text
stringlengths 2
100k
| meta
dict |
---|---|
// Copyright 2012 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.gitiles;
import static com.google.common.base.Preconditions.checkNotNull;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Maps;
import com.google.common.html.types.SafeHtml;
import com.google.gitiles.DateFormatter.Format;
import com.google.gitiles.GitilesRequestFailureException.FailureReason;
import com.google.gitiles.doc.MarkdownConfig;
import com.google.gson.reflect.TypeToken;
import java.io.IOException;
import java.io.OutputStream;
import java.io.Writer;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.eclipse.jgit.errors.IncorrectObjectTypeException;
import org.eclipse.jgit.http.server.ServletUtils;
import org.eclipse.jgit.lib.Config;
import org.eclipse.jgit.lib.Constants;
import org.eclipse.jgit.lib.ObjectId;
import org.eclipse.jgit.lib.Repository;
import org.eclipse.jgit.revwalk.RevCommit;
import org.eclipse.jgit.revwalk.RevObject;
import org.eclipse.jgit.revwalk.RevTree;
import org.eclipse.jgit.revwalk.RevWalk;
/** Serves the index page for a repository, if accessed directly by a browser. */
public class RepositoryIndexServlet extends BaseServlet {
private static final long serialVersionUID = 1L;
static final int REF_LIMIT = 10;
private static final int LOG_LIMIT = 20;
private static final int LOG_WITH_README_LIMIT = 5;
private final TimeCache timeCache;
public RepositoryIndexServlet(
GitilesAccess.Factory accessFactory, Renderer renderer, TimeCache timeCache) {
super(renderer, accessFactory);
this.timeCache = checkNotNull(timeCache, "timeCache");
}
@Override
protected void doHead(HttpServletRequest req, HttpServletResponse res) throws IOException {
// If the repository didn't exist a prior filter would have 404 replied.
Optional<FormatType> format = getFormat(req);
if (!format.isPresent()) {
throw new GitilesRequestFailureException(FailureReason.UNSUPPORTED_RESPONSE_FORMAT);
}
switch (format.get()) {
case HTML:
case JSON:
res.setStatus(HttpServletResponse.SC_OK);
res.setContentType(format.get().getMimeType());
break;
case TEXT:
case DEFAULT:
default:
throw new GitilesRequestFailureException(FailureReason.UNSUPPORTED_RESPONSE_FORMAT);
}
}
@Override
protected void doGetHtml(HttpServletRequest req, HttpServletResponse res) throws IOException {
GitilesView view = ViewFilter.getView(req);
Repository repo = ServletUtils.getRepository(req);
GitilesAccess access = getAccess(req);
RepositoryDescription desc = access.getRepositoryDescription();
try (RevWalk walk = new RevWalk(repo)) {
Paginator paginator = null;
Map<String, Object> data = Maps.newHashMapWithExpectedSize(7);
List<Map<String, Object>> tags = RefServlet.getTagsSoyData(req, timeCache, walk, REF_LIMIT);
ObjectId headId = repo.resolve(Constants.HEAD);
if (headId != null) {
RevObject head = walk.parseAny(headId);
int limit = LOG_LIMIT;
Map<String, Object> readme = renderReadme(req, walk, view, access.getConfig(), head);
if (readme != null) {
data.putAll(readme);
limit = LOG_WITH_README_LIMIT;
}
// TODO(dborowitz): Handle non-commit or missing HEAD?
if (head.getType() == Constants.OBJ_COMMIT) {
walk.reset();
walk.markStart((RevCommit) head);
paginator = new Paginator(walk, limit, null);
}
}
if (!data.containsKey("entries")) {
data.put("entries", ImmutableList.of());
}
List<Map<String, Object>> branches = RefServlet.getBranchesSoyData(req, REF_LIMIT);
data.put("cloneUrl", desc.cloneUrl);
data.put("mirroredFromUrl", Strings.nullToEmpty(desc.mirroredFromUrl));
data.put("description", Strings.nullToEmpty(desc.description));
data.put("branches", trim(branches));
if (branches.size() > REF_LIMIT) {
data.put("moreBranchesUrl", GitilesView.refs().copyFrom(view).toUrl());
}
data.put("tags", trim(tags));
data.put("hasLog", paginator != null);
if (tags.size() > REF_LIMIT) {
data.put("moreTagsUrl", GitilesView.refs().copyFrom(view).toUrl());
}
GitilesConfig.putVariant(getAccess(req).getConfig(), "logEntry", "logEntryVariant", data);
if (paginator != null) {
DateFormatter df = new DateFormatter(access, Format.DEFAULT);
try (OutputStream out =
startRenderStreamingHtml(req, res, "gitiles.repositoryIndex", data)) {
Writer w = newWriter(out, res);
new LogSoyData(req, access, "oneline")
.renderStreaming(
paginator, "HEAD", renderer, w, df, LogSoyData.FooterBehavior.LOG_HEAD);
w.flush();
}
} else {
renderHtml(req, res, "gitiles.repositoryIndex", data);
}
}
}
@Override
protected void doGetJson(HttpServletRequest req, HttpServletResponse res) throws IOException {
GitilesAccess access = getAccess(req);
RepositoryDescription desc = access.getRepositoryDescription();
renderJson(req, res, desc, new TypeToken<RepositoryDescription>() {}.getType());
}
private static <T> List<T> trim(List<T> list) {
return list.size() > REF_LIMIT ? list.subList(0, REF_LIMIT) : list;
}
private static Map<String, Object> renderReadme(
HttpServletRequest req, RevWalk walk, GitilesView view, Config cfg, RevObject head)
throws IOException {
RevTree rootTree;
try {
rootTree = walk.parseTree(head);
} catch (IncorrectObjectTypeException notTreeish) {
return null;
}
ReadmeHelper readme =
new ReadmeHelper(
walk.getObjectReader(),
GitilesView.path().copyFrom(view).setRevision(Revision.HEAD).setPathPart("/").build(),
MarkdownConfig.get(cfg),
rootTree,
req.getRequestURI());
readme.scanTree(rootTree);
if (readme.isPresent()) {
SafeHtml html = readme.render();
if (html != null) {
return ImmutableMap.<String, Object>of("readmeHtml", html);
}
}
return null;
}
}
| {
"pile_set_name": "Github"
} |
p edge 18 21
e 0 16
e 0 5
e 2 1
e 2 4
e 3 12
e 5 15
e 5 4
e 6 14
e 6 15
e 8 14
e 8 10
e 9 12
e 9 7
e 11 1
e 11 7
e 13 10
e 13 1
e 16 3
e 17 7
e 17 15
e 17 4
| {
"pile_set_name": "Github"
} |
---
title: "<sstream> functions"
ms.date: "11/04/2016"
f1_keywords: ["sstream/std::swap"]
ms.assetid: bc9607e8-7c6b-44ef-949b-19e917b450ad
---
# <sstream> functions
[swap](#sstream_swap)
## <a name="sstream_swap"></a> swap
Exchanges the values between two `sstream` objects.
```cpp
template <class Elem, class Tr, class Alloc>
void swap(
basic_stringbuf<Elem, Tr, Alloc>& left,
basic_stringbuf<Elem, Tr, Alloc>& right);
template <class Elem, class Tr, class Alloc>
void swap(
basic_istringstream<Elem, Tr, Alloc>& left,
basic_istringstream<Elem, Tr, Alloc>& right);
template <class Elem, class Tr, class Alloc>
void swap(
basic_ostringstream<Elem, Tr, Alloc>& left,
basic_ostringstream<Elem, Tr, Alloc>& right);
template <class Elem, class Tr, class Alloc>
void swap(
basic_stringstream<Elem, Tr, Alloc>& left,
basic_stringstream<Elem, Tr, Alloc>& right);
```
### Parameters
*left*\
Reference to an `sstream` object.
*right*\
Reference to an `sstream` object.
### Remarks
The template function executes `left.swap(right)`.
## See also
[\<sstream>](../standard-library/sstream.md)
| {
"pile_set_name": "Github"
} |
using System;
namespace Vanara.PInvoke
{
public static partial class User32
{
/// <summary/>
public const int CBEN_FIRST = -800;
/// <summary/>
public const int CBM_FIRST = 0x1700;
/// <summary>Contains combo box status information.</summary>
[PInvokeData("Winuser.h", MSDNShortId = "bb775798")]
[Flags]
public enum ComboBoxInfoState
{
/// <summary>The button exists and is not pressed.</summary>
None = 0,
/// <summary>There is no button.</summary>
STATE_SYSTEM_INVISIBLE = 0x00008000,
/// <summary>The button is pressed.</summary>
STATE_SYSTEM_PRESSED = 0x00000008
}
/// <summary>Windows messages for combo-boxes.</summary>
public enum ComboBoxMessage
{
#pragma warning disable CS1591 // Missing XML comment for publicly visible type or member
CB_GETEDITSEL = 0x0140,
CB_LIMITTEXT = 0x0141,
CB_SETEDITSEL = 0x0142,
CB_ADDSTRING = 0x0143,
CB_DELETESTRING = 0x0144,
CB_DIR = 0x0145,
CB_GETCOUNT = 0x0146,
CB_GETCURSEL = 0x0147,
CB_GETLBTEXT = 0x0148,
CB_GETLBTEXTLEN = 0x0149,
CB_INSERTSTRING = 0x014A,
CB_RESETCONTENT = 0x014B,
CB_FINDSTRING = 0x014C,
CB_SELECTSTRING = 0x014D,
CB_SETCURSEL = 0x014E,
CB_SHOWDROPDOWN = 0x014F,
CB_GETITEMDATA = 0x0150,
CB_SETITEMDATA = 0x0151,
CB_GETDROPPEDCONTROLRECT = 0x0152,
CB_SETITEMHEIGHT = 0x0153,
CB_GETITEMHEIGHT = 0x0154,
CB_SETEXTENDEDUI = 0x0155,
CB_GETEXTENDEDUI = 0x0156,
CB_GETDROPPEDSTATE = 0x0157,
CB_FINDSTRINGEXACT = 0x0158,
CB_SETLOCALE = 0x0159,
CB_GETLOCALE = 0x015A,
CB_GETTOPINDEX = 0x015b,
CB_SETTOPINDEX = 0x015c,
CB_GETHORIZONTALEXTENT = 0x015d,
CB_SETHORIZONTALEXTENT = 0x015e,
CB_GETDROPPEDWIDTH = 0x015f,
CB_SETDROPPEDWIDTH = 0x0160,
CB_INITSTORAGE = 0x0161,
CB_MULTIPLEADDSTRING = 0x0163,
CB_GETCOMBOBOXINFO = 0x0164,
CB_SETMINVISIBLE = CBM_FIRST + 1,
CB_GETMINVISIBLE = CBM_FIRST + 2,
CB_SETCUEBANNER = CBM_FIRST + 3,
CB_GETCUEBANNER = CBM_FIRST + 4,
CBEM_SETIMAGELIST = WindowMessage.WM_USER + 2,
CBEM_GETIMAGELIST = WindowMessage.WM_USER + 3,
CBEM_DELETEITEM = CB_DELETESTRING,
CBEM_GETCOMBOCONTROL = WindowMessage.WM_USER + 6,
CBEM_GETEDITCONTROL = WindowMessage.WM_USER + 7,
CBEM_SETEXSTYLE = WindowMessage.WM_USER + 8, // use SETEXTENDEDSTYLE instead
CBEM_SETEXTENDEDSTYLE = WindowMessage.WM_USER + 14, // lparam == new style, wParam (optional) == mask
CBEM_GETEXSTYLE = WindowMessage.WM_USER + 9, // use GETEXTENDEDSTYLE instead
CBEM_GETEXTENDEDSTYLE = WindowMessage.WM_USER + 9,
CBEM_SETUNICODEFORMAT = 0x2005,
CBEM_GETUNICODEFORMAT = 0x2006,
CBEM_HASEDITCHANGED = WindowMessage.WM_USER + 10,
CBEM_INSERTITEM = WindowMessage.WM_USER + 11,
CBEM_SETITEM = WindowMessage.WM_USER + 12,
CBEM_GETITEM = WindowMessage.WM_USER + 13,
CBEM_SETWINDOWTHEME = 0x200B,
#pragma warning restore CS1591 // Missing XML comment for publicly visible type or member
}
/// <summary>Combo Box Notification Codes</summary>
[PInvokeData("Winuser.h", MSDNShortId = "ff485902")]
public enum ComboBoxNotification
{
/// <summary>
/// Sent when a combo box cannot allocate enough memory to meet a specific request. The parent window of the combo box receives
/// this notification code through the WM_COMMAND message.
/// <list>
/// <item>
/// <term>wParam</term>
/// <description>The LOWORD contains the control identifier of the combo box. The HIWORD specifies the notification code.</description>
/// </item>
/// <item>
/// <term>lParam</term>
/// <description>Handle to the combo box.</description>
/// </item>
/// </list>
/// </summary>
CBN_ERRSPACE = (-1),
/// <summary>
/// Sent when the user changes the current selection in the list box of a combo box. The user can change the selection by
/// clicking in the list box or by using the arrow keys. The parent window of the combo box receives this notification code in
/// the form of a WM_COMMAND message.
/// <list>
/// <item>
/// <term>wParam</term>
/// <description>The LOWORD contains the control identifier of the combo box. The HIWORD specifies the notification code.</description>
/// </item>
/// <item>
/// <term>lParam</term>
/// <description>Handle to the combo box.</description>
/// </item>
/// </list>
/// </summary>
CBN_SELCHANGE = 1,
/// <summary>
/// Sent when the user double-clicks a string in the list box of a combo box. The parent window of the combo box receives this
/// notification code through the WM_COMMAND message.
/// <list>
/// <item>
/// <term>wParam</term>
/// <description>The LOWORD contains the control identifier of the combo box. The HIWORD specifies the notification code.</description>
/// </item>
/// <item>
/// <term>lParam</term>
/// <description>Handle to the combo box.</description>
/// </item>
/// </list>
/// </summary>
CBN_DBLCLK = 2,
/// <summary>
/// Sent when a combo box receives the keyboard focus. The parent window of the combo box receives this notification code through
/// the WM_COMMAND message.
/// <list>
/// <item>
/// <term>wParam</term>
/// <description>The LOWORD contains the control identifier of the combo box. The HIWORD specifies the notification code.</description>
/// </item>
/// <item>
/// <term>lParam</term>
/// <description>Handle to the combo box.</description>
/// </item>
/// </list>
/// </summary>
CBN_SETFOCUS = 3,
/// <summary>
/// Sent when a combo box loses the keyboard focus. The parent window of the combo box receives this notification code through
/// the WM_COMMAND message.
/// <list>
/// <item>
/// <term>wParam</term>
/// <description>The LOWORD contains the control identifier of the combo box. The HIWORD specifies the notification code.</description>
/// </item>
/// <item>
/// <term>lParam</term>
/// <description>Handle to the combo box.</description>
/// </item>
/// </list>
/// </summary>
CBN_KILLFOCUS = 4,
/// <summary>
/// Sent after the user has taken an action that may have altered the text in the edit control portion of a combo box. Unlike the
/// CBN_EDITUPDATE notification code, this notification code is sent after the system updates the screen. The parent window of
/// the combo box receives this notification code through the WM_COMMAND message.
/// <list>
/// <item>
/// <term>wParam</term>
/// <description>The LOWORD contains the control identifier of the combo box. The HIWORD specifies the notification code.</description>
/// </item>
/// <item>
/// <term>lParam</term>
/// <description>Handle to the combo box.</description>
/// </item>
/// </list>
/// </summary>
CBN_EDITCHANGE = 5,
/// <summary>
/// Sent when the edit control portion of a combo box is about to display altered text. This notification code is sent after the
/// control has formatted the text, but before it displays the text. The parent window of the combo box receives this
/// notification code through the WM_COMMAND message.
/// <list>
/// <item>
/// <term>wParam</term>
/// <description>The LOWORD contains the control identifier of the combo box. The HIWORD specifies the notification code.</description>
/// </item>
/// <item>
/// <term>lParam</term>
/// <description>Handle to the combo box.</description>
/// </item>
/// </list>
/// </summary>
CBN_EDITUPDATE = 6,
/// <summary>
/// Sent when the list box of a combo box is about to be made visible. The parent window of the combo box receives this
/// notification code through the WM_COMMAND message.
/// <list>
/// <item>
/// <term>wParam</term>
/// <description>The LOWORD contains the control identifier of the combo box. The HIWORD specifies the notification code.</description>
/// </item>
/// <item>
/// <term>lParam</term>
/// <description>Handle to the combo box.</description>
/// </item>
/// </list>
/// </summary>
CBN_DROPDOWN = 7,
/// <summary>
/// Sent when the list box of a combo box has been closed. The parent window of the combo box receives this notification code
/// through the WM_COMMAND message.
/// <list>
/// <item>
/// <term>wParam</term>
/// <description>The LOWORD contains the control identifier of the combo box. The HIWORD specifies the notification code.</description>
/// </item>
/// <item>
/// <term>lParam</term>
/// <description>Handle to the combo box.</description>
/// </item>
/// </list>
/// </summary>
CBN_CLOSEUP = 8,
/// <summary>
/// Sent when the user selects a list item, or selects an item and then closes the list. It indicates that the user's selection
/// is to be processed. The parent window of the combo box receives this notification code through the WM_COMMAND message.
/// <list>
/// <item>
/// <term>wParam</term>
/// <description>The LOWORD contains the control identifier of the combo box. The HIWORD specifies the notification code.</description>
/// </item>
/// <item>
/// <term>lParam</term>
/// <description>Handle to the combo box.</description>
/// </item>
/// </list>
/// </summary>
CBN_SELENDOK = 9,
/// <summary>
/// Sent when the user selects an item, but then selects another control or closes the dialog box. It indicates the user's
/// initial selection is to be ignored. The parent window of the combo box receives this notification code through the WM_COMMAND message.
/// <list>
/// <item>
/// <term>wParam</term>
/// <description>The LOWORD contains the control identifier of the combo box. The HIWORD specifies the notification code.</description>
/// </item>
/// <item>
/// <term>lParam</term>
/// <description>Handle to the combo box.</description>
/// </item>
/// </list>
/// </summary>
CBN_SELENDCANCEL = 10,
/// <summary>
/// Sent when a new item has been inserted in the control. This notification code is sent in the form of a WM_NOTIFY message.
/// <list>
/// <item>
/// <term>lParam</term>
/// <description>
/// A pointer to an <c>NMCOMBOBOXEX</c> structure containing information about the notification code and the item that was inserted.
/// </description>
/// </item>
/// </list>
/// </summary>
CBEN_INSERTITEM = CBEN_FIRST - 1,
/// <summary>
/// Sent when an item has been deleted. This notification code is sent in the form of a WM_NOTIFY message.
/// <list>
/// <item>
/// <term>lParam</term>
/// <description>
/// A pointer to an <c>NMCOMBOBOXEX</c> structure that contains information about the notification code and the deleted item.
/// </description>
/// </item>
/// </list>
/// </summary>
CBEN_DELETEITEM = CBEN_FIRST - 2,
/// <summary>
/// Sent when the user activates the drop-down list or clicks in the control's edit box. This notification code is sent in the
/// form of a WM_NOTIFY message.
/// <list>
/// <item>
/// <term>lParam</term>
/// <description>
/// A pointer to an <see cref="User32.NMHDR"/> structure that contains information about the notification code.
/// </description>
/// </item>
/// </list>
/// </summary>
CBEN_BEGINEDIT = CBEN_FIRST - 4,
/// <summary>
/// Sent when the user has concluded an operation within the edit box or has selected an item from the control's drop-down list.
/// This notification code is sent in the form of a WM_NOTIFY message.
/// <list>
/// <item>
/// <term>lParam</term>
/// <description>
/// A pointer to an <c>NMCBEENDEDIT</c> structure that contains information about how the user concluded the edit operation.
/// </description>
/// </item>
/// </list>
/// </summary>
CBEN_ENDEDITA = CBEN_FIRST - 5,
/// <summary>
/// Sent when the user has concluded an operation within the edit box or has selected an item from the control's drop-down list.
/// This notification code is sent in the form of a WM_NOTIFY message.
/// <list>
/// <item>
/// <term>lParam</term>
/// <description>
/// A pointer to an <c>NMCBEENDEDIT</c> structure that contains information about how the user concluded the edit operation.
/// </description>
/// </item>
/// </list>
/// </summary>
CBEN_ENDEDITW = CBEN_FIRST - 6,
/// <summary>
/// Sent to retrieve display information about a callback item. This notification code is sent in the form of a WM_NOTIFY message.
/// <list>
/// <item>
/// <term>lParam</term>
/// <description>
/// A pointer to an <c>NMCOMBOBOXEX</c> structure that contains information about the notification code.
/// </description>
/// </item>
/// </list>
/// </summary>
CBEN_GETDISPINFO = CBEN_FIRST - 7,
/// <summary>
/// Sent when the user begins dragging the image of the item displayed in the edit portion of the control. This notification code
/// is sent in the form of a WM_NOTIFY message.
/// <list>
/// <item>
/// <term>lParam</term>
/// <description>
/// A pointer to a <c>NMCBEDRAGBEGIN</c> structure that contains information about the notification code.
/// </description>
/// </item>
/// </list>
/// </summary>
CBEN_DRAGBEGIN = CBEN_FIRST - 9,
}
/// <summary>
/// To create a combo box using the CreateWindow or CreateWindowEx function, specify the COMBOBOX class, appropriate window style
/// constants, and a combination of the following combo box styles.
/// </summary>
[PInvokeData("CommCtrl.h", MSDNShortId = "bb775796")]
public enum ComboBoxStyle
{
/// <summary>Displays the list box at all times. The current selection in the list box is displayed in the edit control.</summary>
CBS_SIMPLE = 0x0001,
/// <summary>
/// Similar to CBS_SIMPLE, except that the list box is not displayed unless the user selects an icon next to the edit control.
/// </summary>
CBS_DROPDOWN = 0x0002,
/// <summary>
/// Similar to CBS_DROPDOWN, except that the edit control is replaced by a static text item that displays the current selection
/// in the list box.
/// </summary>
CBS_DROPDOWNLIST = 0x0003,
/// <summary>
/// Specifies that the owner of the list box is responsible for drawing its contents and that the items in the list box are all
/// the same height. The owner window receives a WM_MEASUREITEM message when the combo box is created and a WM_DRAWITEM message
/// when a visual aspect of the combo box has changed.
/// </summary>
CBS_OWNERDRAWFIXED = 0x0010,
/// <summary>
/// Specifies that the owner of the list box is responsible for drawing its contents and that the items in the list box are
/// variable in height. The owner window receives a WM_MEASUREITEM message for each item in the combo box when you create the
/// combo box and a WM_DRAWITEM message when a visual aspect of the combo box has changed.
/// </summary>
CBS_OWNERDRAWVARIABLE = 0x0020,
/// <summary>
/// Automatically scrolls the text in an edit control to the right when the user types a character at the end of the line. If
/// this style is not set, only text that fits within the rectangular boundary is allowed.
/// </summary>
CBS_AUTOHSCROLL = 0x0040,
/// <summary>
/// Converts text entered in the combo box edit control from the Windows character set to the OEM character set and then back to
/// the Windows character set. This ensures proper character conversion when the application calls the CharToOem function to
/// convert a Windows string in the combo box to OEM characters. This style is most useful for combo boxes that contain file
/// names and applies only to combo boxes created with the CBS_SIMPLE or CBS_DROPDOWN style.
/// </summary>
CBS_OEMCONVERT = 0x0080,
/// <summary>Automatically sorts strings added to the list box.</summary>
CBS_SORT = 0x0100,
/// <summary>
/// Specifies that an owner-drawn combo box contains items consisting of strings. The combo box maintains the memory and address
/// for the strings so the application can use the CB_GETLBTEXT message to retrieve the text for a particular item.
/// </summary>
CBS_HASSTRINGS = 0x0200,
/// <summary>
/// Specifies that the size of the combo box is exactly the size specified by the application when it created the combo box.
/// Normally, the system sizes a combo box so that it does not display partial items.
/// </summary>
CBS_NOINTEGRALHEIGHT = 0x0400,
/// <summary>
/// Shows a disabled vertical scroll bar in the list box when the box does not contain enough items to scroll. Without this
/// style, the scroll bar is hidden when the list box does not contain enough items.
/// </summary>
CBS_DISABLENOSCROLL = 0x0800,
/// <summary>Converts to uppercase all text in both the selection field and the list.</summary>
CBS_UPPERCASE = 0x2000,
/// <summary>Converts to lowercase all text in both the selection field and the list.</summary>
CBS_LOWERCASE = 0x4000,
}
}
} | {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<root>
<!--
Microsoft ResX Schema
Version 2.0
The primary goals of this format is to allow a simple XML format
that is mostly human readable. The generation and parsing of the
various data types are done through the TypeConverter classes
associated with the data types.
Example:
... ado.net/XML headers & schema ...
<resheader name="resmimetype">text/microsoft-resx</resheader>
<resheader name="version">2.0</resheader>
<resheader name="reader">System.Resources.ResXResourceReader, System.Windows.Forms, ...</resheader>
<resheader name="writer">System.Resources.ResXResourceWriter, System.Windows.Forms, ...</resheader>
<data name="Name1"><value>this is my long string</value><comment>this is a comment</comment></data>
<data name="Color1" type="System.Drawing.Color, System.Drawing">Blue</data>
<data name="Bitmap1" mimetype="application/x-microsoft.net.object.binary.base64">
<value>[base64 mime encoded serialized .NET Framework object]</value>
</data>
<data name="Icon1" type="System.Drawing.Icon, System.Drawing" mimetype="application/x-microsoft.net.object.bytearray.base64">
<value>[base64 mime encoded string representing a byte array form of the .NET Framework object]</value>
<comment>This is a comment</comment>
</data>
There are any number of "resheader" rows that contain simple
name/value pairs.
Each data row contains a name, and value. The row also contains a
type or mimetype. Type corresponds to a .NET class that support
text/value conversion through the TypeConverter architecture.
Classes that don't support this are serialized and stored with the
mimetype set.
The mimetype is used for serialized objects, and tells the
ResXResourceReader how to depersist the object. This is currently not
extensible. For a given mimetype the value must be set accordingly:
Note - application/x-microsoft.net.object.binary.base64 is the format
that the ResXResourceWriter will generate, however the reader can
read any of the formats listed below.
mimetype: application/x-microsoft.net.object.binary.base64
value : The object must be serialized with
: System.Runtime.Serialization.Formatters.Binary.BinaryFormatter
: and then encoded with base64 encoding.
mimetype: application/x-microsoft.net.object.soap.base64
value : The object must be serialized with
: System.Runtime.Serialization.Formatters.Soap.SoapFormatter
: and then encoded with base64 encoding.
mimetype: application/x-microsoft.net.object.bytearray.base64
value : The object must be serialized into a byte array
: using a System.ComponentModel.TypeConverter
: and then encoded with base64 encoding.
-->
<xsd:schema id="root" xmlns="" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:msdata="urn:schemas-microsoft-com:xml-msdata">
<xsd:import namespace="http://www.w3.org/XML/1998/namespace" />
<xsd:element name="root" msdata:IsDataSet="true">
<xsd:complexType>
<xsd:choice maxOccurs="unbounded">
<xsd:element name="metadata">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="value" type="xsd:string" minOccurs="0" />
</xsd:sequence>
<xsd:attribute name="name" use="required" type="xsd:string" />
<xsd:attribute name="type" type="xsd:string" />
<xsd:attribute name="mimetype" type="xsd:string" />
<xsd:attribute ref="xml:space" />
</xsd:complexType>
</xsd:element>
<xsd:element name="assembly">
<xsd:complexType>
<xsd:attribute name="alias" type="xsd:string" />
<xsd:attribute name="name" type="xsd:string" />
</xsd:complexType>
</xsd:element>
<xsd:element name="data">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="value" type="xsd:string" minOccurs="0" msdata:Ordinal="1" />
<xsd:element name="comment" type="xsd:string" minOccurs="0" msdata:Ordinal="2" />
</xsd:sequence>
<xsd:attribute name="name" type="xsd:string" use="required" msdata:Ordinal="1" />
<xsd:attribute name="type" type="xsd:string" msdata:Ordinal="3" />
<xsd:attribute name="mimetype" type="xsd:string" msdata:Ordinal="4" />
<xsd:attribute ref="xml:space" />
</xsd:complexType>
</xsd:element>
<xsd:element name="resheader">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="value" type="xsd:string" minOccurs="0" msdata:Ordinal="1" />
</xsd:sequence>
<xsd:attribute name="name" type="xsd:string" use="required" />
</xsd:complexType>
</xsd:element>
</xsd:choice>
</xsd:complexType>
</xsd:element>
</xsd:schema>
<resheader name="resmimetype">
<value>text/microsoft-resx</value>
</resheader>
<resheader name="version">
<value>2.0</value>
</resheader>
<resheader name="reader">
<value>System.Resources.ResXResourceReader, System.Windows.Forms, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089</value>
</resheader>
<resheader name="writer">
<value>System.Resources.ResXResourceWriter, System.Windows.Forms, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089</value>
</resheader>
<metadata name="flowLayoutPanel1.GenerateMember" type="System.Boolean, mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089">
<value>False</value>
</metadata>
<metadata name="flowLayoutPanel1.Locked" type="System.Boolean, mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089">
<value>True</value>
</metadata>
<metadata name="tableLayoutPanel1.Locked" type="System.Boolean, mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089">
<value>True</value>
</metadata>
<metadata name="RegHotkeysAtStartupLabel.Locked" type="System.Boolean, mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089">
<value>True</value>
</metadata>
<metadata name="SwitchSystemProxyLabel.Locked" type="System.Boolean, mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089">
<value>True</value>
</metadata>
<metadata name="SwitchProxyModeLabel.Locked" type="System.Boolean, mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089">
<value>True</value>
</metadata>
<metadata name="SwitchAllowLanLabel.Locked" type="System.Boolean, mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089">
<value>True</value>
</metadata>
<metadata name="ShowLogsLabel.Locked" type="System.Boolean, mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089">
<value>True</value>
</metadata>
<metadata name="ServerMoveUpLabel.Locked" type="System.Boolean, mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089">
<value>True</value>
</metadata>
<metadata name="ServerMoveDownLabel.Locked" type="System.Boolean, mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089">
<value>True</value>
</metadata>
<metadata name="SwitchSystemProxyTextBox.Locked" type="System.Boolean, mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089">
<value>True</value>
</metadata>
<metadata name="SwitchProxyModeTextBox.Locked" type="System.Boolean, mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089">
<value>True</value>
</metadata>
<metadata name="SwitchAllowLanTextBox.Locked" type="System.Boolean, mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089">
<value>True</value>
</metadata>
<metadata name="ShowLogsTextBox.Locked" type="System.Boolean, mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089">
<value>True</value>
</metadata>
<metadata name="ServerMoveUpTextBox.Locked" type="System.Boolean, mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089">
<value>True</value>
</metadata>
<metadata name="ServerMoveDownTextBox.Locked" type="System.Boolean, mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089">
<value>True</value>
</metadata>
<metadata name="RegHotkeysAtStartupCheckBox.Locked" type="System.Boolean, mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089">
<value>True</value>
</metadata>
<metadata name="btnOK.Locked" type="System.Boolean, mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089">
<value>True</value>
</metadata>
<metadata name="btnCancel.Locked" type="System.Boolean, mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089">
<value>True</value>
</metadata>
<metadata name="btnRegisterAll.Locked" type="System.Boolean, mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089">
<value>True</value>
</metadata>
<metadata name="$this.Locked" type="System.Boolean, mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089">
<value>True</value>
</metadata>
</root> | {
"pile_set_name": "Github"
} |
template<typename ...Types>
struct tuple { };
void f(tuple<int, float, double>);
// RUN: c-index-test -test-load-source-usrs all -std=c++11 %s | FileCheck %s
// CHECK: usrs-cxx0x.cpp c:@ST>1#pT@tuple Extent=[1:1 - 2:17]
// CHECK: usrs-cxx0x.cpp c:@F@f#$@S@tuple>#p3Ifd# Extent=[4:1 - 4:34]
| {
"pile_set_name": "Github"
} |
<Window x:Class="ClrMDStudio.AnalysisWindow"
xmlns="http://schemas.microsoft.com/winfx/2006/xaml/presentation"
xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml"
xmlns:d="http://schemas.microsoft.com/expression/blend/2008"
xmlns:mc="http://schemas.openxmlformats.org/markup-compatibility/2006"
xmlns:local="clr-namespace:ClrMDStudio"
mc:Ignorable="d"
Title="Analysis"
Width="600" Height="500"
>
<Grid>
<Grid.RowDefinitions>
<RowDefinition Height="*"/>
<RowDefinition Height="Auto"/>
</Grid.RowDefinitions>
<TextBox x:Name="tbResults"
FontFamily="Consolas"
Foreground="LightGray" Background="Black"
AcceptsReturn="True"
MaxLines="1000000"
ScrollViewer.HorizontalScrollBarVisibility="Auto"
ScrollViewer.VerticalScrollBarVisibility="Visible"
/>
<ProgressBar Grid.Row="1" x:Name="progressBar"
Orientation="Horizontal"
Minimum="0" Maximum="100" IsIndeterminate="True"
Height="0" Visibility="Hidden"/>
</Grid>
</Window>
| {
"pile_set_name": "Github"
} |
# Copyright (c) 2016, 2019 Oracle and/or its affiliates. All rights reserved. This
# code is released under a tri EPL/GPL/LGPL license. You can use it,
# redistribute it and/or modify it under the terms of the:
#
# Eclipse Public License version 2.0, or
# GNU General Public License version 2, or
# GNU Lesser General Public License version 2.1.
require_relative '../../ruby/spec_helper'
require_relative 'fixtures/classes'
describe "Truffle::Interop.read" do
describe "with an Array" do
before :each do
@array = [1, 2, 3]
end
it "reads a value of an index that exists" do
Truffle::Interop.read_array_element(@array, 1).should == 2
end
it "reads a method of given name that exists" do
Truffle::Interop.read_member(@array, "[]").should == @array.method(:[])
end
it "raises for an index that doesn't exist" do
-> { Truffle::Interop.read_array_element(@array, 100) }.should raise_error(IndexError)
end
end
describe "with a name that starts with @" do
before :each do
@object = TruffleInteropSpecs::InteropKeysClass.new
end
it "that exists as an instance variable reads it" do
Truffle::Interop.read_member(@object, :@b).should == 2
end
it "that does not exist as an instance variable raises" do
-> { Truffle::Interop.read_member(@object, :@foo) }.should raise_error NameError
end
end
describe "with an object with a method of the same name" do
it "produces a bound Method" do
object = TruffleInteropSpecs::InteropKeysClass.new
Truffle::Interop.read_member(object, :foo).call.should == 14
end
end
describe "with an object with an index method" do
it "calls the index method" do
object = TruffleInteropSpecs::PolyglotArray.new
value = Object.new
Truffle::Interop.write_array_element(object, 2, value)
Truffle::Interop.read_array_element(object, 2).should == value
object.log.should include([:polyglot_read_array_element, 2])
end
end
describe "with both an object with a method of the same name and an index method" do
it "calls the index method" do
object = TruffleInteropSpecs::PolyglotMember.new
Truffle::Interop.write_member(object, :bob, 14)
Truffle::Interop.read_member(object, :bob).should == 14
Truffle::Interop.members(object).should include 'bob'
object.log.should include [:polyglot_read_member, 'bob']
end
end
describe "with an object without a method of the same name or an index method" do
it "raises UnknownIdentifierException" do
object = Object.new
-> {
Truffle::Interop.read_member(object, :foo)
}.should raise_error(NameError, /Unknown identifier: foo/)
end
end
describe "with a Proc" do
it "does not call the proc" do
proc = -> { raise 'called' }
-> { Truffle::Interop.read_member(proc, :key) }.should raise_error NameError
-> { Truffle::Interop.read_member(proc, :@var) }.should raise_error NameError
Truffle::Interop.read_member(proc, 'call').should == proc.method(:call)
end
end
describe "with a Hash class" do
it "does not call the [] method" do
-> { Truffle::Interop.read_member(Hash, :nothing) }.should raise_error NameError
end
end
end
| {
"pile_set_name": "Github"
} |
using Translatable;
namespace pdfforge.PDFCreator.Conversion.Settings.Enums
{
[Translatable]
public enum ColorModel
{
[Translation("RGB")]
Rgb,
[Translation("CMYK")]
Cmyk,
[Translation("Grayscale")]
Gray
}
} | {
"pile_set_name": "Github"
} |
// Boost.Assign library
//
// Copyright Thorsten Ottosen 2003-2004. Use, modification and
// distribution is subject to the Boost Software License, Version
// 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// For more information, see http://www.boost.org/libs/assign/
//
#ifndef BOOST_ASSIGN_STD_MAP_HPP
#define BOOST_ASSIGN_STD_MAP_HPP
#if defined(_MSC_VER)
# pragma once
#endif
#include <boost/assign/list_inserter.hpp>
#include <boost/config.hpp>
#include <map>
namespace boost
{
namespace assign
{
template< class K, class V, class C, class A, class P >
inline list_inserter< assign_detail::call_insert< std::map<K,V,C,A> >, P >
operator+=( std::map<K,V,C,A>& m, const P& p )
{
return insert( m )( p );
}
template< class K, class V, class C, class A, class P >
inline list_inserter< assign_detail::call_insert< std::multimap<K,V,C,A> >, P >
operator+=( std::multimap<K,V,C,A>& m, const P& p )
{
return insert( m )( p );
}
}
}
#endif
| {
"pile_set_name": "Github"
} |
/*
*
* Copyright (c) 2004 Nex Vision
* Guillaume GOURAT <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/serial_core.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/platform_data/i2c-s3c2410.h>
#include <asm/irq.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <mach/hardware.h>
#include <mach/regs-gpio.h>
#include <plat/clock.h>
#include <plat/cpu.h>
#include <plat/devs.h>
#include <plat/regs-serial.h>
#include <plat/samsung-time.h>
#include "common.h"
#include "otom.h"
static struct map_desc otom11_iodesc[] __initdata = {
/* Device area */
{ (u32)OTOM_VA_CS8900A_BASE, OTOM_PA_CS8900A_BASE, SZ_16M, MT_DEVICE },
};
#define UCON S3C2410_UCON_DEFAULT
#define ULCON S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB
#define UFCON S3C2410_UFCON_RXTRIG12 | S3C2410_UFCON_FIFOMODE
static struct s3c2410_uartcfg otom11_uartcfgs[] __initdata = {
[0] = {
.hwport = 0,
.flags = 0,
.ucon = UCON,
.ulcon = ULCON,
.ufcon = UFCON,
},
[1] = {
.hwport = 1,
.flags = 0,
.ucon = UCON,
.ulcon = ULCON,
.ufcon = UFCON,
},
/* port 2 is not actually used */
[2] = {
.hwport = 2,
.flags = 0,
.ucon = UCON,
.ulcon = ULCON,
.ufcon = UFCON,
}
};
/* NOR Flash on NexVision OTOM board */
static struct resource otom_nor_resource[] = {
[0] = DEFINE_RES_MEM(S3C2410_CS0, SZ_4M),
};
static struct platform_device otom_device_nor = {
.name = "mtd-flash",
.id = -1,
.num_resources = ARRAY_SIZE(otom_nor_resource),
.resource = otom_nor_resource,
};
/* Standard OTOM devices */
static struct platform_device *otom11_devices[] __initdata = {
&s3c_device_ohci,
&s3c_device_lcd,
&s3c_device_wdt,
&s3c_device_i2c0,
&s3c_device_iis,
&s3c_device_rtc,
&otom_device_nor,
};
static void __init otom11_map_io(void)
{
s3c24xx_init_io(otom11_iodesc, ARRAY_SIZE(otom11_iodesc));
s3c24xx_init_clocks(0);
s3c24xx_init_uarts(otom11_uartcfgs, ARRAY_SIZE(otom11_uartcfgs));
samsung_set_timer_source(SAMSUNG_PWM3, SAMSUNG_PWM4);
}
static void __init otom11_init(void)
{
s3c_i2c0_set_platdata(NULL);
platform_add_devices(otom11_devices, ARRAY_SIZE(otom11_devices));
}
MACHINE_START(OTOM, "Nex Vision - Otom 1.1")
/* Maintainer: Guillaume GOURAT <[email protected]> */
.atag_offset = 0x100,
.map_io = otom11_map_io,
.init_machine = otom11_init,
.init_irq = s3c2410_init_irq,
.init_time = samsung_timer_init,
.restart = s3c2410_restart,
MACHINE_END
| {
"pile_set_name": "Github"
} |
<?php
/**
* User: zach
* Date: 5/1/13
* Time: 12:16 PM
*
* @category Elasticsearch
* @package Elasticsearch\Common\Exceptions
* @author Zachary Tong <[email protected]>
* @license http://www.apache.org/licenses/LICENSE-2.0 Apache2
* @link http://elasticsearch.org
*/
namespace Elasticsearch\Common\Exceptions;
/**
* UnexpectedValueException
* Denote a value that is outside the normally accepted values
*/
class UnexpectedValueException extends \UnexpectedValueException implements ElasticsearchException
{
}
| {
"pile_set_name": "Github"
} |
// Copyright (c) 2019 Hugo Amiard [email protected]
// This software is provided 'as-is' under the zlib License, see the LICENSE.txt file.
// This notice and the license may not be removed or altered from any source distribution.
#pragma once
#include <stl/vector.h>
#include <gfx/Forward.h>
#include <gfx/Frustum.h>
namespace two
{
export_ struct refl_ TWO_GFX_EXPORT ClusteredFrustum : public Frustum
{
uint16_t m_subdiv_x = 0;
uint16_t m_subdiv_y = 0;
uint16_t m_subdiv_z = 0;
uint16_t m_cluster_count = 0;
uvec2 m_tile_size = {};
vec2 m_inv_tile_size = {};
float m_linearizer = 0.f;
float m_far_log2 = 0.f;
vec2 m_clip_to_cluster = vec2(0.f);
vector<float> m_distances_z; // max 2.1 MiB (actual: resolution dependant)
vector<vec4> m_planes_x;
vector<vec4> m_planes_y;
vector<vec4> m_bounding_spheres;
uint16_t index(uint ix, uint iy, uint iz) const { return uint16_t(ix + (iy * m_subdiv_x) + (iz * m_subdiv_x * m_subdiv_y)); }
uvec2 tile_index(const vec2& clip) const;
uint slice(float z) const;
Frustum cluster_frustum(uint x, uint y, uint z) const;
void resize(const vec2& clip_size);
void recompute(const mat4& projection, const vec2& clip_size);
};
export_ TWO_GFX_EXPORT void compute_frustum_subdiv(ClusteredFrustum& frustum, vec2 clip_size, size_t slices = 16);
export_ TWO_GFX_EXPORT void compute_frustum_subdiv_square(ClusteredFrustum& frustum, vec2 clip_size, size_t slices = 16, size_t max_clusters = 8192);
}
| {
"pile_set_name": "Github"
} |
1E400:
1E401:
1E402:
1E403:
1E404:
1E405:
1E406:
1E407:
1E408:
1E409:
1E40A:
1E40B:
1E40C:
1E40D:
1E40E:
1E40F:
1E410:
1E411:
1E412:
1E413:
1E414:
1E415:
1E416:
1E417:
1E418:
1E419:
1E41A:
1E41B:
1E41C:
1E41D:
1E41E:
1E41F:
1E420:
1E421:
1E422:
1E423:
1E424:
1E425:
1E426:
1E427:
1E428:
1E429:
1E42A:
1E42B:
1E42C:
1E42D:
1E42E:
1E42F:
1E430:
1E431:
1E432:
1E433:
1E434:
1E435:
1E436:
1E437:
1E438:
1E439:
1E43A:
1E43B:
1E43C:
1E43D:
1E43E:
1E43F:
1E440:
1E441:
1E442:
1E443:
1E444:
1E445:
1E446:
1E447:
1E448:
1E449:
1E44A:
1E44B:
1E44C:
1E44D:
1E44E:
1E44F:
1E450:
1E451:
1E452:
1E453:
1E454:
1E455:
1E456:
1E457:
1E458:
1E459:
1E45A:
1E45B:
1E45C:
1E45D:
1E45E:
1E45F:
1E460:
1E461:
1E462:
1E463:
1E464:
1E465:
1E466:
1E467:
1E468:
1E469:
1E46A:
1E46B:
1E46C:
1E46D:
1E46E:
1E46F:
1E470:
1E471:
1E472:
1E473:
1E474:
1E475:
1E476:
1E477:
1E478:
1E479:
1E47A:
1E47B:
1E47C:
1E47D:
1E47E:
1E47F:
1E480:
1E481:
1E482:
1E483:
1E484:
1E485:
1E486:
1E487:
1E488:
1E489:
1E48A:
1E48B:
1E48C:
1E48D:
1E48E:
1E48F:
1E490:
1E491:
1E492:
1E493:
1E494:
1E495:
1E496:
1E497:
1E498:
1E499:
1E49A:
1E49B:
1E49C:
1E49D:
1E49E:
1E49F:
1E4A0:
1E4A1:
1E4A2:
1E4A3:
1E4A4:
1E4A5:
1E4A6:
1E4A7:
1E4A8:
1E4A9:
1E4AA:
1E4AB:
1E4AC:
1E4AD:
1E4AE:
1E4AF:
1E4B0:
1E4B1:
1E4B2:
1E4B3:
1E4B4:
1E4B5:
1E4B6:
1E4B7:
1E4B8:
1E4B9:
1E4BA:
1E4BB:
1E4BC:
1E4BD:
1E4BE:
1E4BF:
1E4C0:
1E4C1:
1E4C2:
1E4C3:
1E4C4:
1E4C5:
1E4C6:
1E4C7:
1E4C8:
1E4C9:
1E4CA:
1E4CB:
1E4CC:
1E4CD:
1E4CE:
1E4CF:
1E4D0:
1E4D1:
1E4D2:
1E4D3:
1E4D4:
1E4D5:
1E4D6:
1E4D7:
1E4D8:
1E4D9:
1E4DA:
1E4DB:
1E4DC:
1E4DD:
1E4DE:
1E4DF:
1E4E0:
1E4E1:
1E4E2:
1E4E3:
1E4E4:
1E4E5:
1E4E6:
1E4E7:
1E4E8:
1E4E9:
1E4EA:
1E4EB:
1E4EC:
1E4ED:
1E4EE:
1E4EF:
1E4F0:
1E4F1:
1E4F2:
1E4F3:
1E4F4:
1E4F5:
1E4F6:
1E4F7:
1E4F8:
1E4F9:
1E4FA:
1E4FB:
1E4FC:
1E4FD:
1E4FE:
1E4FF:
| {
"pile_set_name": "Github"
} |
/*
Copyright 2014 Workiva, LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package hilbert
import (
"runtime"
"sync"
h "github.com/Workiva/go-datastructures/numerics/hilbert"
"github.com/Workiva/go-datastructures/rtree"
)
func getCenter(rect rtree.Rectangle) (int32, int32) {
xlow, ylow := rect.LowerLeft()
xhigh, yhigh := rect.UpperRight()
return (xhigh + xlow) / 2, (yhigh + ylow) / 2
}
type hilbertBundle struct {
hilbert hilbert
rect rtree.Rectangle
}
func bundlesFromRects(rects ...rtree.Rectangle) []*hilbertBundle {
chunks := chunkRectangles(rects, int64(runtime.NumCPU()))
bundleChunks := make([][]*hilbertBundle, len(chunks))
var wg sync.WaitGroup
wg.Add(len(chunks))
for i := 0; i < runtime.NumCPU(); i++ {
if len(chunks[i]) == 0 {
bundleChunks[i] = []*hilbertBundle{}
wg.Done()
continue
}
go func(i int) {
bundles := make([]*hilbertBundle, 0, len(chunks[i]))
for _, r := range chunks[i] {
h := h.Encode(getCenter(r))
bundles = append(bundles, &hilbertBundle{hilbert(h), r})
}
bundleChunks[i] = bundles
wg.Done()
}(i)
}
wg.Wait()
bundles := make([]*hilbertBundle, 0, len(rects))
for _, bc := range bundleChunks {
bundles = append(bundles, bc...)
}
return bundles
}
// chunkRectangles takes a slice of rtree.Rectangle values and chunks it into `numParts` subslices.
func chunkRectangles(slice rtree.Rectangles, numParts int64) []rtree.Rectangles {
parts := make([]rtree.Rectangles, numParts)
for i := int64(0); i < numParts; i++ {
parts[i] = slice[i*int64(len(slice))/numParts : (i+1)*int64(len(slice))/numParts]
}
return parts
}
| {
"pile_set_name": "Github"
} |
import { render, fireEvent, act, screen } from '@testing-library/react';
import { useState, useEffect } from '@wordpress/element';
import InstallationFeedback from './installation-feedback';
import useFeaturesPolling from './use-features-polling';
import {
INSTALLING_STATUS,
ERROR_STATUS,
INSTALLED_STATUS,
EXTERNAL_STATUS,
} from './feature-status';
// Mock features data.
jest.mock( './use-features-polling', () => jest.fn() );
const featuresOptions = [
{
slug: 'test-installing',
title: 'Test installing',
excerpt: 'Test installing',
status: INSTALLING_STATUS,
},
{
slug: 'test-error',
title: 'Test error',
excerpt: 'Test error',
error: 'Error message',
status: ERROR_STATUS,
},
{
slug: 'test-error-2',
title: 'Test error 2',
excerpt: 'Test error 2',
error: 'Error message',
status: ERROR_STATUS,
},
{
slug: 'test-installed',
title: 'Test installed',
excerpt: 'Test installed',
status: INSTALLED_STATUS,
},
{
slug: 'test-external',
title: 'Test External',
excerpt: 'Test External',
status: EXTERNAL_STATUS,
},
{
slug: 'test-empty',
title: 'Test empty status',
excerpt: 'Test empty status',
},
];
describe( '<InstallationFeedback />', () => {
it( 'Should render with loading status', () => {
const features = {
selected: [ 'test-installing', 'test-error', 'test-installed' ],
options: featuresOptions,
};
useFeaturesPolling.mockImplementation( () => features );
const { queryByText } = render(
<InstallationFeedback
onContinue={ () => {} }
onRetry={ () => {} }
/>
);
expect( queryByText( 'Installing…' ) ).toBeTruthy();
} );
it( 'Should render all success', () => {
const features = {
selected: [ 'test-installed' ],
options: featuresOptions,
};
useFeaturesPolling.mockImplementation( () => features );
const onContinueMock = jest.fn();
const { container, queryByText } = render(
<InstallationFeedback
onContinue={ onContinueMock }
onRetry={ () => {} }
/>
);
expect( container.querySelectorAll( 'button' ).length ).toEqual( 1 );
fireEvent.click( queryByText( 'Continue' ) );
expect( onContinueMock ).toBeCalled();
} );
it( 'Should render errors without loading', () => {
const features = {
selected: [ 'test-error', 'test-installed' ],
options: featuresOptions,
};
useFeaturesPolling.mockImplementation( () => features );
const onContinueMock = jest.fn();
const { queryByText } = render(
<InstallationFeedback
onContinue={ onContinueMock }
onRetry={ () => {} }
/>
);
expect( queryByText( 'Retry' ) ).toBeTruthy();
fireEvent.click( queryByText( 'Continue' ) );
expect( onContinueMock ).toBeCalled();
} );
it( 'Should update external plugin status until installed', async () => {
const onContinueMock = jest.fn();
jest.useFakeTimers();
useFeaturesPolling.mockImplementation( ( active ) => {
const [ pollingCount, setPollingCount ] = useState( 0 );
useEffect( () => {
if ( ! active ) {
return;
}
const timer = setTimeout(
() => setPollingCount( ( n ) => n + 1 ),
2000
);
return () => {
clearTimeout( timer );
};
}, [ active, pollingCount ] );
return 0 === pollingCount
? {
selected: [ 'test-external' ],
options: [
{
slug: 'test-external',
title: 'Test External',
excerpt: 'Test External',
status: EXTERNAL_STATUS,
},
],
}
: {
selected: [ 'test-external' ],
options: [
{
slug: 'test-external',
title: 'Test External',
excerpt: 'Test External',
status: INSTALLED_STATUS,
},
],
};
} );
render(
<InstallationFeedback
onContinue={ onContinueMock }
onRetry={ () => {} }
/>
);
expect( screen.queryByText( 'Continue' ) ).toBeTruthy();
expect( screen.queryByText( 'Plugin installed' ) ).toBeFalsy();
act( jest.runAllTimers );
expect( screen.queryByText( 'Continue' ) ).toBeTruthy();
expect( screen.queryByText( 'Plugin installed' ) ).toBeTruthy();
fireEvent.click( screen.queryByText( 'Continue' ) );
expect( onContinueMock ).toBeCalled();
} );
it( 'Should retry installations with error', () => {
const features = {
selected: [ 'test-error', 'test-error-2', 'test-installed' ],
options: featuresOptions,
};
useFeaturesPolling.mockImplementation( () => features );
const onRetryMock = jest.fn();
const { queryByText, queryAllByText } = render(
<InstallationFeedback
onContinue={ () => {} }
onRetry={ onRetryMock }
/>
);
fireEvent.click( queryByText( 'Retry' ) );
expect( onRetryMock ).toBeCalledWith( [
'test-error',
'test-error-2',
] );
onRetryMock.mockReset();
fireEvent.click( queryAllByText( 'Retry?' )[ 0 ] );
expect( onRetryMock ).toBeCalledWith( [ 'test-error' ] );
} );
} );
| {
"pile_set_name": "Github"
} |
%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
--- !u!21 &2100000
Material:
serializedVersion: 6
m_ObjectHideFlags: 0
m_PrefabParentObject: {fileID: 0}
m_PrefabInternal: {fileID: 0}
m_Name: Terrain
m_Shader: {fileID: 4800000, guid: f41b97872ce3bba439516a22765e895b, type: 3}
m_ShaderKeywords: _AA_ON _ALPHATEST_ON _EMISSION _GLOW_ON _MODE_BARY _QUAD_ON _SMOOTHNESS_TEXTURE_ALBEDO_CHANNEL_A
_WLIGHT_SURFACE _WSTYLE_DEFAULT _WUV_UV0
m_LightmapFlags: 4
m_EnableInstancingVariants: 0
m_DoubleSidedGI: 0
m_CustomRenderQueue: 2450
stringTagMap:
RenderType: TransparentCutout
disabledShaderPasses: []
m_SavedProperties:
serializedVersion: 3
m_TexEnvs:
- _BumpMap:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
- _DetailAlbedoMap:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
- _DetailMask:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
- _DetailNormalMap:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
- _EmissionMap:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
- _MainTex:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
- _MetallicGlossMap:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
- _OcclusionMap:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
- _ParallaxMap:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
- _WMask:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
- _WTex:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
m_Floats:
- _AASmooth: 1.6
- _BumpScale: 1
- _Cull: 2
- _Cutoff: 0.8
- _DetailNormalMapScale: 1
- _DstBlend: 0
- _FDist: 0.010000001
- _FMode: 0
- _FPow: 10
- _Fade: 0
- _Fold: 26
- _GDist: 0.16
- _GEmission: 1.05
- _GPower: 1.25
- _GlossMapScale: 0
- _Glossiness: 0.5
- _GlossyReflections: 1
- _Glow: 1
- _Limits: 0
- _Metallic: 0
- _Mode: 1
- _OcclusionStrength: 1
- _Parallax: 0.02
- _Quad: 1
- _SmoothnessTextureChannel: 1
- _SpecularHighlights: 1
- _SrcBlend: 1
- _TwoSided: 0
- _UVSec: 0
- _WEmission: 3
- _WGloss: 0
- _WInvert: 2
- _WLight: 0
- _WMetal: 0
- _WMode: 3
- _WOpacity: 1
- _WParam: 0
- _WStyle: 0
- _WThickness: 0.06600001
- _WTransparency: 0
- _WUV: 0
- _ZWrite: 1
m_Colors:
- _Color: {r: 1, g: 0.35077757, b: 0, a: 1}
- _EmissionColor: {r: 1, g: 0.35077757, b: 0, a: 1}
- _GColor: {r: 1, g: 0.35077757, b: 0, a: 1}
- _WColor: {r: 1, g: 0.35077757, b: 0, a: 1}
| {
"pile_set_name": "Github"
} |
<?php
namespace GW2Spidy\DB\map;
use \RelationMap;
use \TableMap;
/**
* This class defines the structure of the 'discipline' table.
*
*
*
* This map class is used by Propel to do runtime db structure discovery.
* For example, the createSelectSql() method checks the type of a given column used in an
* ORDER BY clause to know whether it needs to apply SQL to make the ORDER BY case-insensitive
* (i.e. if it's a text column type).
*
* @package propel.generator.gw2spidy.map
*/
class DisciplineTableMap extends TableMap
{
/**
* The (dot-path) name of this class
*/
const CLASS_NAME = 'gw2spidy.map.DisciplineTableMap';
/**
* Initialize the table attributes, columns and validators
* Relations are not initialized by this method since they are lazy loaded
*
* @return void
* @throws PropelException
*/
public function initialize()
{
// attributes
$this->setName('discipline');
$this->setPhpName('Discipline');
$this->setClassname('GW2Spidy\\DB\\Discipline');
$this->setPackage('gw2spidy');
$this->setUseIdGenerator(false);
// columns
$this->addPrimaryKey('ID', 'Id', 'INTEGER', true, null, null);
$this->addColumn('NAME', 'Name', 'VARCHAR', true, 255, null);
// validators
} // initialize()
/**
* Build the RelationMap objects for this table relationships
*/
public function buildRelations()
{
$this->addRelation('Recipe', 'GW2Spidy\\DB\\Recipe', RelationMap::ONE_TO_MANY, array('id' => 'discipline_id', ), null, null, 'Recipes');
} // buildRelations()
} // DisciplineTableMap
| {
"pile_set_name": "Github"
} |
/*
* leds-max8997.c - LED class driver for MAX8997 LEDs.
*
* Copyright (C) 2011 Samsung Electronics
* Donggeun Kim <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/module.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/leds.h>
#include <linux/mfd/max8997.h>
#include <linux/mfd/max8997-private.h>
#include <linux/platform_device.h>
#define MAX8997_LED_FLASH_SHIFT 3
#define MAX8997_LED_FLASH_CUR_MASK 0xf8
#define MAX8997_LED_MOVIE_SHIFT 4
#define MAX8997_LED_MOVIE_CUR_MASK 0xf0
#define MAX8997_LED_FLASH_MAX_BRIGHTNESS 0x1f
#define MAX8997_LED_MOVIE_MAX_BRIGHTNESS 0xf
#define MAX8997_LED_NONE_MAX_BRIGHTNESS 0
#define MAX8997_LED0_FLASH_MASK 0x1
#define MAX8997_LED0_FLASH_PIN_MASK 0x5
#define MAX8997_LED0_MOVIE_MASK 0x8
#define MAX8997_LED0_MOVIE_PIN_MASK 0x28
#define MAX8997_LED1_FLASH_MASK 0x2
#define MAX8997_LED1_FLASH_PIN_MASK 0x6
#define MAX8997_LED1_MOVIE_MASK 0x10
#define MAX8997_LED1_MOVIE_PIN_MASK 0x30
#define MAX8997_LED_BOOST_ENABLE_MASK (1 << 6)
struct max8997_led {
struct max8997_dev *iodev;
struct led_classdev cdev;
bool enabled;
int id;
enum max8997_led_mode led_mode;
struct mutex mutex;
};
static void max8997_led_set_mode(struct max8997_led *led,
enum max8997_led_mode mode)
{
int ret;
struct i2c_client *client = led->iodev->i2c;
u8 mask = 0, val;
switch (mode) {
case MAX8997_FLASH_MODE:
mask = MAX8997_LED1_FLASH_MASK | MAX8997_LED0_FLASH_MASK;
val = led->id ?
MAX8997_LED1_FLASH_MASK : MAX8997_LED0_FLASH_MASK;
led->cdev.max_brightness = MAX8997_LED_FLASH_MAX_BRIGHTNESS;
break;
case MAX8997_MOVIE_MODE:
mask = MAX8997_LED1_MOVIE_MASK | MAX8997_LED0_MOVIE_MASK;
val = led->id ?
MAX8997_LED1_MOVIE_MASK : MAX8997_LED0_MOVIE_MASK;
led->cdev.max_brightness = MAX8997_LED_MOVIE_MAX_BRIGHTNESS;
break;
case MAX8997_FLASH_PIN_CONTROL_MODE:
mask = MAX8997_LED1_FLASH_PIN_MASK |
MAX8997_LED0_FLASH_PIN_MASK;
val = led->id ?
MAX8997_LED1_FLASH_PIN_MASK : MAX8997_LED0_FLASH_PIN_MASK;
led->cdev.max_brightness = MAX8997_LED_FLASH_MAX_BRIGHTNESS;
break;
case MAX8997_MOVIE_PIN_CONTROL_MODE:
mask = MAX8997_LED1_MOVIE_PIN_MASK |
MAX8997_LED0_MOVIE_PIN_MASK;
val = led->id ?
MAX8997_LED1_MOVIE_PIN_MASK : MAX8997_LED0_MOVIE_PIN_MASK;
led->cdev.max_brightness = MAX8997_LED_MOVIE_MAX_BRIGHTNESS;
break;
default:
led->cdev.max_brightness = MAX8997_LED_NONE_MAX_BRIGHTNESS;
break;
}
if (mask) {
ret = max8997_update_reg(client, MAX8997_REG_LEN_CNTL, val,
mask);
if (ret)
dev_err(led->iodev->dev,
"failed to update register(%d)\n", ret);
}
led->led_mode = mode;
}
static void max8997_led_enable(struct max8997_led *led, bool enable)
{
int ret;
struct i2c_client *client = led->iodev->i2c;
u8 val = 0, mask = MAX8997_LED_BOOST_ENABLE_MASK;
if (led->enabled == enable)
return;
val = enable ? MAX8997_LED_BOOST_ENABLE_MASK : 0;
ret = max8997_update_reg(client, MAX8997_REG_BOOST_CNTL, val, mask);
if (ret)
dev_err(led->iodev->dev,
"failed to update register(%d)\n", ret);
led->enabled = enable;
}
static void max8997_led_set_current(struct max8997_led *led,
enum led_brightness value)
{
int ret;
struct i2c_client *client = led->iodev->i2c;
u8 val = 0, mask = 0, reg = 0;
switch (led->led_mode) {
case MAX8997_FLASH_MODE:
case MAX8997_FLASH_PIN_CONTROL_MODE:
val = value << MAX8997_LED_FLASH_SHIFT;
mask = MAX8997_LED_FLASH_CUR_MASK;
reg = led->id ? MAX8997_REG_FLASH2_CUR : MAX8997_REG_FLASH1_CUR;
break;
case MAX8997_MOVIE_MODE:
case MAX8997_MOVIE_PIN_CONTROL_MODE:
val = value << MAX8997_LED_MOVIE_SHIFT;
mask = MAX8997_LED_MOVIE_CUR_MASK;
reg = MAX8997_REG_MOVIE_CUR;
break;
default:
break;
}
if (mask) {
ret = max8997_update_reg(client, reg, val, mask);
if (ret)
dev_err(led->iodev->dev,
"failed to update register(%d)\n", ret);
}
}
static void max8997_led_brightness_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct max8997_led *led =
container_of(led_cdev, struct max8997_led, cdev);
if (value) {
max8997_led_set_current(led, value);
max8997_led_enable(led, true);
} else {
max8997_led_set_current(led, value);
max8997_led_enable(led, false);
}
}
static ssize_t max8997_led_show_mode(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct max8997_led *led =
container_of(led_cdev, struct max8997_led, cdev);
ssize_t ret = 0;
mutex_lock(&led->mutex);
switch (led->led_mode) {
case MAX8997_FLASH_MODE:
ret += sprintf(buf, "FLASH\n");
break;
case MAX8997_MOVIE_MODE:
ret += sprintf(buf, "MOVIE\n");
break;
case MAX8997_FLASH_PIN_CONTROL_MODE:
ret += sprintf(buf, "FLASH_PIN_CONTROL\n");
break;
case MAX8997_MOVIE_PIN_CONTROL_MODE:
ret += sprintf(buf, "MOVIE_PIN_CONTROL\n");
break;
default:
ret += sprintf(buf, "NONE\n");
break;
}
mutex_unlock(&led->mutex);
return ret;
}
static ssize_t max8997_led_store_mode(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct max8997_led *led =
container_of(led_cdev, struct max8997_led, cdev);
enum max8997_led_mode mode;
mutex_lock(&led->mutex);
if (!strncmp(buf, "FLASH_PIN_CONTROL", 17))
mode = MAX8997_FLASH_PIN_CONTROL_MODE;
else if (!strncmp(buf, "MOVIE_PIN_CONTROL", 17))
mode = MAX8997_MOVIE_PIN_CONTROL_MODE;
else if (!strncmp(buf, "FLASH", 5))
mode = MAX8997_FLASH_MODE;
else if (!strncmp(buf, "MOVIE", 5))
mode = MAX8997_MOVIE_MODE;
else
mode = MAX8997_NONE;
max8997_led_set_mode(led, mode);
mutex_unlock(&led->mutex);
return size;
}
static DEVICE_ATTR(mode, 0644, max8997_led_show_mode, max8997_led_store_mode);
static struct attribute *max8997_attrs[] = {
&dev_attr_mode.attr,
NULL
};
ATTRIBUTE_GROUPS(max8997);
static int max8997_led_probe(struct platform_device *pdev)
{
struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
struct max8997_platform_data *pdata = dev_get_platdata(iodev->dev);
struct max8997_led *led;
char name[20];
int ret = 0;
if (pdata == NULL) {
dev_err(&pdev->dev, "no platform data\n");
return -ENODEV;
}
led = devm_kzalloc(&pdev->dev, sizeof(*led), GFP_KERNEL);
if (led == NULL)
return -ENOMEM;
led->id = pdev->id;
snprintf(name, sizeof(name), "max8997-led%d", pdev->id);
led->cdev.name = name;
led->cdev.brightness_set = max8997_led_brightness_set;
led->cdev.flags |= LED_CORE_SUSPENDRESUME;
led->cdev.brightness = 0;
led->cdev.groups = max8997_groups;
led->iodev = iodev;
/* initialize mode and brightness according to platform_data */
if (pdata->led_pdata) {
u8 mode = 0, brightness = 0;
mode = pdata->led_pdata->mode[led->id];
brightness = pdata->led_pdata->brightness[led->id];
max8997_led_set_mode(led, pdata->led_pdata->mode[led->id]);
if (brightness > led->cdev.max_brightness)
brightness = led->cdev.max_brightness;
max8997_led_set_current(led, brightness);
led->cdev.brightness = brightness;
} else {
max8997_led_set_mode(led, MAX8997_NONE);
max8997_led_set_current(led, 0);
}
mutex_init(&led->mutex);
ret = devm_led_classdev_register(&pdev->dev, &led->cdev);
if (ret < 0)
return ret;
return 0;
}
static struct platform_driver max8997_led_driver = {
.driver = {
.name = "max8997-led",
},
.probe = max8997_led_probe,
};
module_platform_driver(max8997_led_driver);
MODULE_AUTHOR("Donggeun Kim <[email protected]>");
MODULE_DESCRIPTION("MAX8997 LED driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:max8997-led");
| {
"pile_set_name": "Github"
} |
// ai for the ball and the jack in ep3
#include "CBallJack.h"
#include "engine/core/spritedefines.h"
#define BALL_SPEED 120
#define JACK_SPEED 120
#define JACK_ANIM_RATE 12
#define BALLPUSHAMOUNT 30
char BJ_BlockedD(int o);
unsigned int rnd(void);
CBallJack::CBallJack(CMap *pmap, Uint32 x, Uint32 y, object_t type):
CVorticonSpriteObject(pmap, x, y, type)
{
m_Direction = DUPLEFT;
animframe = 0;
animtimer = 0;
inhibitfall = 1;
if (m_type==OBJ_BALL)
{
speed = BALL_SPEED;
canbezapped = 1;
}
else
{
speed = JACK_SPEED;
canbezapped = 0;
}
performCollisions();
}
void CBallJack::getTouchedBy(CSpriteObject &theObject)
{
if(CPlayer *player = dynamic_cast<CPlayer*>(&theObject))
{
if (m_type==OBJ_BALL)
{
player->push(*this);
switch(m_Direction)
{
case DUPRIGHT: m_Direction = DUPLEFT; break;
case DUPLEFT: m_Direction = DUPRIGHT; break;
case DDOWNRIGHT: m_Direction = DDOWNLEFT; break;
case DDOWNLEFT: m_Direction = DDOWNRIGHT; break;
default: break;
}
}
else
{
player->kill();
}
}
}
void CBallJack::process()
{
switch(m_Direction)
{
case DUPLEFT:
if (blockedu) { m_Direction = DDOWNLEFT; }
else moveUp(speed);
if (blockedl) { m_Direction = DUPRIGHT; }
else moveLeft(speed);
break;
case DUPRIGHT:
if (blockedu) { m_Direction = DDOWNRIGHT; }
else moveUp(speed);
if (blockedr) { m_Direction = DUPLEFT; }
else moveRight(speed);
break;
case DDOWNLEFT:
if (blockedd) { m_Direction = DUPLEFT; }
else moveDown(speed);
if (blockedl) { m_Direction = DDOWNRIGHT; }
else moveLeft(speed);
break;
case DDOWNRIGHT:
if (blockedd) { m_Direction = DUPRIGHT; }
else moveDown(speed);
if (blockedr) { m_Direction = DDOWNLEFT; }
else moveRight(speed);
break;
default: break;
}
if (m_type==OBJ_BALL)
{
mSpriteIdx = OBJ_BALL_DEFSPRITE;
}
else
{
mSpriteIdx = OBJ_JACK_DEFSPRITE + animframe;
if (animtimer > JACK_ANIM_RATE)
{
animframe++;
if (animframe>3) animframe=0;
animtimer = 0;
}
else animtimer++;
}
}
void CBallJack::getShotByRay(object_t &obj_type)
{
// have ball change direction when zapped
if (zapd==LEFT)
{
switch(m_Direction)
{
case DUPRIGHT: m_Direction = DUPLEFT; break;
case DDOWNRIGHT: m_Direction = DDOWNLEFT; break;
default : break;
}
}
else
{
switch(m_Direction)
{
case DUPLEFT: m_Direction = DUPRIGHT; break;
case DDOWNLEFT: m_Direction = DDOWNRIGHT; break;
default : break;
}
}
}
| {
"pile_set_name": "Github"
} |
export const defaultConfig = {
legend: {
position: 'bottom',
labels: {
fontColor: '#34495e',
fontFamily: 'sans-serif',
fontSize: 14,
padding: 20,
usePointStyle: true,
},
},
tooltips: {
bodyFontSize: 14,
bodyFontFamily: 'sans-serif',
},
responsive: true,
maintainAspectRatio: false,
}
export const chartTypesMap = {
pie: 'pie-chart',
donut: 'donut-chart',
bubble: 'bubble-chart',
line: 'line-chart',
'horizontal-bar': 'horizontal-bar-chart',
'vertical-bar': 'vertical-bar-chart',
}
| {
"pile_set_name": "Github"
} |
namespace Chat
{
using Microsoft.VisualBasic.CompilerServices;
using System;
public class StresserTest
{
private SlowLorisClass SlowLorisAttack;
public void StartSlowLorisSUB(string URL, int SocketForThread, int TotThread)
{
try
{
this.SlowLorisAttack = new SlowLorisClass();
this.SlowLorisAttack.UrlAttack = URL;
this.SlowLorisAttack.ThreadSocket = SocketForThread;
this.SlowLorisAttack.TotalThreadOpen = TotThread;
this.SlowLorisAttack.StartAttack();
}
catch (Exception exception1)
{
ProjectData.SetProjectError(exception1);
ProjectData.ClearProjectError();
}
}
public void StartTCPSUB(string URL, int SizePack, int Threads)
{
try
{
if (!TCPStresser.Alive)
{
TCPStresser.URLS = URL;
TCPStresser.Size = SizePack;
TCPStresser.jThre = Threads;
TCPStresser.StartAttack();
}
}
catch (Exception exception1)
{
ProjectData.SetProjectError(exception1);
ProjectData.ClearProjectError();
}
}
public void StartUDPSub(string URL4, int Packet, int Threads)
{
try
{
if (!UDPClass.IsAlive)
{
UDPClass.UrlAttack = URL4;
UDPClass.PackteSize = Packet;
UDPClass.Threads = Threads;
UDPClass.Start();
}
}
catch (Exception exception1)
{
ProjectData.SetProjectError(exception1);
ProjectData.ClearProjectError();
}
}
public void StopSlowLorisSub()
{
try
{
this.SlowLorisAttack.StopAllThread();
}
catch (Exception exception1)
{
ProjectData.SetProjectError(exception1);
ProjectData.ClearProjectError();
}
}
public void StopTCPSUB()
{
try
{
if (TCPStresser.Alive)
{
TCPStresser.StopAttack();
}
}
catch (Exception exception1)
{
ProjectData.SetProjectError(exception1);
ProjectData.ClearProjectError();
}
}
public void StopUDPSub()
{
try
{
if (UDPClass.IsAlive)
{
UDPClass.CheckStatus();
}
}
catch (Exception exception1)
{
ProjectData.SetProjectError(exception1);
ProjectData.ClearProjectError();
}
}
}
}
| {
"pile_set_name": "Github"
} |
# http://rosettacode.org/wiki/Greatest_element_of_a_list#Raku
use v6;
use Test;
plan 1;
my $rosetta-code = {
#### RC-begin
say [max] 17, 13, 50, 56, 28, 63, 62, 66, 74, 54;
say [max] 'my', 'dog', 'has', 'fleas';
sub max2 (*@a) { reduce -> $x, $y { $y after $x ?? $y !! $x }, @a }
say max2 17, 13, 50, 56, 28, 63, 62, 66, 74, 54;
#### RC-end
}
my $output;
{
temp $*OUT = class {
method print(*@args) {
$output ~= @args.join;
}
}
$rosetta-code.();
}
my $expected = "74
my
74
";
is($output.subst("\r\n", "\n", :g), $expected.subst("\r\n", "\n", :g), "Greatest element of a list");
# vim: expandtab shiftwidth=4
| {
"pile_set_name": "Github"
} |
Framework One Sample Application - QBall
Raymond Camden 2010
http://www.raymondcamden.com/index.cfm/2010/2/27/Framework-One-Sample-Application--QBall
Updated and included in FW/1 (with permission) by Jeremey Hustman 2014
** Using ColdFusion ORM is not recommended, but it does work. **
This is an example application that uses Framework One (FW/1) and ColdFusion ORM (tested on both Adobe and Railo).
You must first create a datasource 'qBall' in ColdFusion administrator pointing to an empty MySql database.
If using a database other than MySql, adjust your dialect settings in Application.cfc 'this.ormsettings' to reflect your environment.
| {
"pile_set_name": "Github"
} |
-- Copyright (C) 2001 Bill Billowitch.
-- Some of the work to develop this test suite was done with Air Force
-- support. The Air Force and Bill Billowitch assume no
-- responsibilities for this software.
-- This file is part of VESTs (Vhdl tESTs).
-- VESTs is free software; you can redistribute it and/or modify it
-- under the terms of the GNU General Public License as published by the
-- Free Software Foundation; either version 2 of the License, or (at
-- your option) any later version.
-- VESTs is distributed in the hope that it will be useful, but WITHOUT
-- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-- for more details.
-- You should have received a copy of the GNU General Public License
-- along with VESTs; if not, write to the Free Software Foundation,
-- Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-- ---------------------------------------------------------------------
--
-- $Id: tc813.vhd,v 1.2 2001-10-26 16:30:27 paw Exp $
-- $Revision: 1.2 $
--
-- ---------------------------------------------------------------------
ARCHITECTURE c01s02b00x00p04n02i00813arch OF c01s02b00x00p04n02i00813ent IS
BEGIN
TESTING: PROCESS
BEGIN
assert FALSE
report "***FAILED TEST: c01s02b00x00p04n02i00813 - Entity declaration and architecture body must reside in the same library."
severity ERROR;
wait;
END PROCESS TESTING;
END c01s02b00x00p04n02i00813arch;
| {
"pile_set_name": "Github"
} |
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "view-in-github"
},
"source": [
"<a href=\"https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W1D3_ModelFitting/student/W1D3_Tutorial2.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text"
},
"source": [
"# Neuromatch Academy: Week 1, Day 3, Tutorial 2\n",
"# Model Fitting: Linear regression with MLE\n",
"\n",
"**Content creators**: Pierre-Étienne Fiquet, Anqi Wu, Alex Hyafil with help from Byron Galbraith\n",
"\n",
"**Content reviewers**: Lina Teichmann, Madineh Sarvestani, Patrick Mineault, Ella Batty, Michael Waskom\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text"
},
"source": [
"---\n",
"#Tutorial Objectives\n",
"\n",
"This is Tutorial 2 of a series on fitting models to data. We start with simple linear regression, using least squares optimization (Tutorial 1) and Maximum Likelihood Estimation (Tutorial 2). We will use bootstrapping to build confidence intervals around the inferred linear model parameters (Tutorial 3). We'll finish our exploration of regression models by generalizing to multiple linear regression and polynomial regression (Tutorial 4). We end by learning how to choose between these various models. We discuss the bias-variance trade-off (Tutorial 5) and Cross Validation for model selection (Tutorial 6).\n",
"\n",
"In this tutorial, we will use a different approach to fit linear models that incorporates the random 'noise' in our data.\n",
"- Learn about probability distributions and probabilistic models\n",
"- Learn how to calculate the likelihood of our model parameters\n",
"- Learn how to implement the maximum likelihood estimator, to find the model parameter with the maximum likelihood\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text"
},
"source": [
"---\n",
"# Setup"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code"
},
"outputs": [],
"source": [
"import numpy as np\n",
"import matplotlib.pyplot as plt\n",
"from scipy import stats"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"cellView": "form",
"colab": {},
"colab_type": "code"
},
"outputs": [],
"source": [
"#@title Figure Settings\n",
"import ipywidgets as widgets # interactive display\n",
"%config InlineBackend.figure_format = 'retina'\n",
"plt.style.use(\"https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"cellView": "form",
"colab": {},
"colab_type": "code"
},
"outputs": [],
"source": [
"#@title Helper Functions\n",
"def plot_density_image(x, y, theta, sigma=1, ax=None):\n",
" \"\"\" Plots probability distribution of y given x, theta, and sigma\n",
"\n",
" Args:\n",
"\n",
" x (ndarray): An array of shape (samples,) that contains the input values.\n",
" y (ndarray): An array of shape (samples,) that contains the corresponding\n",
" measurement values to the inputs.\n",
" theta (float): Slope parameter\n",
" sigma (float): standard deviation of Gaussian noise\n",
"\n",
" \"\"\"\n",
"\n",
" # plot the probability density of p(y|x,theta)\n",
" if ax is None:\n",
" fig, ax = plt.subplots()\n",
"\n",
" xmin, xmax = np.floor(np.min(x)), np.ceil(np.max(x))\n",
" ymin, ymax = np.floor(np.min(y)), np.ceil(np.max(y))\n",
" xx = np.linspace(xmin, xmax, 50)\n",
" yy = np.linspace(ymin, ymax, 50)\n",
"\n",
" surface = np.zeros((len(yy), len(xx)))\n",
" for i, x_i in enumerate(xx):\n",
" surface[:, i] = stats.norm(theta * x_i, sigma).pdf(yy)\n",
"\n",
" ax.set(xlabel='x', ylabel='y')\n",
"\n",
" return ax.imshow(surface, origin='lower', aspect='auto', vmin=0, vmax=None,\n",
" cmap=plt.get_cmap('Wistia'),\n",
" extent=[xmin, xmax, ymin, ymax])\n",
"\n",
"\n",
"\n",
"def solve_normal_eqn(x, y):\n",
" \"\"\"Solve the normal equations to produce the value of theta_hat that minimizes\n",
" MSE.\n",
"\n",
" Args:\n",
" x (ndarray): An array of shape (samples,) that contains the input values.\n",
" y (ndarray): An array of shape (samples,) that contains the corresponding\n",
" measurement values to the inputs.\n",
" theta_hat (float): An estimate of the slope parameter.\n",
"\n",
" Returns:\n",
" float: The mean squared error of the data with the estimated parameter.\n",
" \"\"\"\n",
" theta_hat = (x.T @ y) / (x.T @ x)\n",
" return theta_hat"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text"
},
"source": [
"---\n",
"# Section 1: Maximum Likelihood Estimation (MLE)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"cellView": "form",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 517
},
"colab_type": "code",
"outputId": "68f35348-6538-4524-dda4-e150e08f5588"
},
"outputs": [],
"source": [
"#@title Video 1: Maximum Likelihood Estimation\n",
"from IPython.display import YouTubeVideo\n",
"video = YouTubeVideo(id=\"8mpNmzLKNfU\", width=854, height=480, fs=1)\n",
"print(\"Video available at https://youtube.com/watch?v=\" + video.id)\n",
"video"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text"
},
"source": [
"In the previous tutorial we made the assumption that the data was drawn from a linear relationship with noise added, and found an effective approach for estimating model parameters based on minimizing the mean squared error.\n",
"\n",
"In that case we treated the noise as simply a nuisance, but what if we factored it directly into our model?\n",
"\n",
"Recall our linear model:\n",
"\n",
"\\begin{align}\n",
"y = \\theta x + \\epsilon.\n",
"\\end{align}\n",
"\n",
"The noise component $\\epsilon$ is often modeled as a random variable drawn from a Gaussian distribution (also called the normal distribution).\n",
"\n",
"The Gaussian distribution is described by its [probability density function](https://en.wikipedia.org/wiki/Probability_density_function) (pdf)\n",
"\\begin{align}\n",
"\\mathcal{N}(x; \\mu, \\sigma^2) = \\frac{1}{\\sqrt{2\\pi\\sigma^2}}e^{-\\frac{1}{2\\sigma^2}(x-\\mu)^2}\n",
"\\end{align}\n",
"\n",
"and is dependent on two parameters: the mean $\\mu$ and the variance $\\sigma^2$. We often consider the noise signal to be Gaussian \"white noise\", with zero mean and unit variance:\n",
"\n",
"\\begin{align}\n",
"\\epsilon \\sim \\mathcal{N}(0, 1).\n",
"\\end{align}\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text"
},
"source": [
"## Interactive Demo: Gaussian Distribution Explorer\n",
"\n",
"Use the explorer widget below to see how varying the $\\mu$ and $\\sigma$ parameters change the location and shape of the samples."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"cellView": "form",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 494,
"referenced_widgets": [
"bfd70882f10945b096fe70d9ef1486cd",
"2d7a17359c5d430abbc21d3e20d2e8a5",
"4fac186a03454c3f95c25da40f71e19c",
"5df5db2be4f049819d26d13ead27e41c",
"d1d82ff4a0274afeb6afbd311ba2f526",
"d0e0cb67d67c4550a0bcbabdf6c45455",
"d99af8c857254508a1d4359f445f4f51",
"470d2916aeb4493aa17cf5b467ebc90f",
"4ca59a8b511b400d837f46949611c867",
"791de5eb8c024e88bad22199a94c9be3"
]
},
"colab_type": "code",
"outputId": "c6e875c3-a219-40f0-fdb4-51df8af1fa0c"
},
"outputs": [],
"source": [
"#@title\n",
"\n",
"#@markdown Make sure you execute this cell to enable the widget!\n",
"\n",
"@widgets.interact(mu=widgets.FloatSlider(0.0, min=-2.0, max=2.0),\n",
" sigma=widgets.FloatSlider(1.0, min=0.5, max=2.0))\n",
"def plot_normal_dist(mu=0, sigma=1):\n",
"\n",
" # Generate pdf & samples from normal distribution with mu/sigma\n",
" rv = stats.norm(mu, sigma)\n",
" x = np.linspace(-5, 5, 100)\n",
" y = rv.pdf(x)\n",
" samples = rv.rvs(1000)\n",
"\n",
" # Plot\n",
" fig, ax = plt.subplots()\n",
" ax.hist(samples, 20, density=True, color='g', histtype='stepfilled', alpha=0.8,\n",
" label='histogram')\n",
" ax.plot(x, y, color='orange', linewidth=3, label='pdf')\n",
" ax.vlines(mu, 0, rv.pdf(mu), color='y', linewidth=3, label='$\\mu$')\n",
" ax.vlines([mu-sigma, mu+sigma], 0, rv.pdf([mu-sigma, mu+sigma]), colors='red',\n",
" color='b', linewidth=3, label='$\\sigma$')\n",
" ax.set(xlabel='x', ylabel='probability density', xlim=[-5, 5], ylim=[0, 1.0])\n",
" ax.legend()"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text"
},
"source": [
"\n",
"## Section 1.1: Probabilistic Models\n",
"\n",
"Now that we have a model of our noise component $\\epsilon$ as random variable, how do we incorporate this back into our original linear model from before? Consider again our simplified model $y = \\theta x + \\epsilon$ where the noise has zero mean and unit variance $\\epsilon \\sim \\mathcal{N}(0, 1)$. We can now also treat $y$ as a random variable drawn from a Gaussian distribution where $\\mu = \\theta x$ and $\\sigma^2 = 1$:\n",
"\n",
"\\begin{align}\n",
"y \\sim \\mathcal{N}(\\theta x, 1)\n",
"\\end{align}\n",
"\n",
"which is to say that the probability of observing $y$ given $x$ and parameter $\\theta$ is\n",
"\\begin{align}\n",
"p(y|x,\\theta) = \\frac{1}{\\sqrt{2\\pi}}e^{-\\frac{1}{2}(y-\\theta x)^2}\n",
"\\end{align}\n",
"\n",
"---\n",
"\n",
"Let's revisit our original sample dataset where the true underlying model has $\\theta = 1.2$."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"cellView": "form",
"colab": {},
"colab_type": "code"
},
"outputs": [],
"source": [
"# @title\n",
"\n",
"# @markdown Execute this cell to generate some simulated data\n",
"\n",
"# setting a fixed seed to our random number generator ensures we will always\n",
"# get the same psuedorandom number sequence\n",
"\n",
"np.random.seed(121)\n",
"theta = 1.2\n",
"n_samples = 30\n",
"x = 10 * np.random.rand(n_samples) # sample from a uniform distribution over [0,10)\n",
"noise = np.random.randn(n_samples) # sample from a standard normal distribution\n",
"y = theta * x + noise"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text"
},
"source": [
"This time we can plot the density of $p(y|x,\\theta=1.2)$ and see how $p(y)$ changes for different values of $x$."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"cellView": "form",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 285
},
"colab_type": "code",
"outputId": "edaf91a5-9223-4c93-8d53-63bd34761c32"
},
"outputs": [],
"source": [
"#@title\n",
"#@markdown Execute this cell to visualize p(y|x, theta=1.2)\n",
"\n",
"fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(10, 4))\n",
"\n",
"# Invokes helper function to generate density image plots from data and parameters\n",
"im = plot_density_image(x, y, 1.2, ax=ax1)\n",
"plt.colorbar(im, ax=ax1)\n",
"ax1.axvline(8, color='k')\n",
"ax1.set(title=r'p(y | x, $\\theta$=1.2)')\n",
"\n",
"# Plot pdf for given x\n",
"ylim = ax1.get_ylim()\n",
"yy = np.linspace(ylim[0], ylim[1], 50)\n",
"ax2.plot(yy, stats.norm(theta * 8, 1).pdf(yy), color='orange', linewidth=2)\n",
"ax2.set(\n",
" title=r'p(y|x=8, $\\theta$=1.2)',\n",
" xlabel='y',\n",
" ylabel='probability density');"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text"
},
"source": [
"## Section 1.2: Likelihood Estimation\n",
"\n",
"Now that we have our probabilistic model, we turn back to our original challenge of finding a good estimate for $\\theta$ that fits our data. Given the inherent uncertainty when dealing in probabilities, we talk about the [likelihood](https://en.wikipedia.org/wiki/Likelihood_function) that some estimate $\\hat \\theta$ fits our data. The likelihood function $\\mathcal{L(\\theta)}$ is equal to the probability density function parameterized by that $\\theta$:\n",
"\n",
"\\begin{align}\n",
"\\mathcal{L}(\\theta|x,y) = p(y|x,\\theta) = \\frac{1}{\\sqrt{2\\pi\\sigma^2}}e^{-\\frac{1}{2\\sigma^2}(y-\\theta x)^2}\n",
"\\end{align}"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text"
},
"source": [
"### Exercise 1: Likelihood Function\n",
"\n",
"In this exercise you will implement the likelihood function $\\mathcal{L}(\\theta|x,y)$ for our linear model where $\\sigma = 1$.\n",
"\n",
"After implementing this function, we can produce probabilities that our estimate $\\hat{\\theta}$ generated the provided observations. We will try with one of the samples from our dataset.\n",
"\n",
"TIP: Use `np.exp` and `np.sqrt` for the exponential and square root functions, respectively."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code"
},
"outputs": [],
"source": [
"def likelihood(theta_hat, x, y):\n",
" \"\"\"The likelihood function for a linear model with noise sampled from a\n",
" Gaussian distribution with zero mean and unit variance.\n",
"\n",
" Args:\n",
" theta_hat (float): An estimate of the slope parameter.\n",
" x (ndarray): An array of shape (samples,) that contains the input values.\n",
" y (ndarray): An array of shape (samples,) that contains the corresponding\n",
" measurement values to the inputs.\n",
"\n",
" Returns:\n",
" ndarray: the likelihood values for the theta_hat estimate\n",
" \"\"\"\n",
" sigma = 1\n",
" ##############################################################################\n",
" ## TODO for students: implement the likelihood function\n",
" # Fill out function and remove\n",
" raise NotImplementedError(\"Student exercise: implement the likelihood function\")\n",
" ##############################################################################\n",
"\n",
" # Compute Gaussian likelihood\n",
" pdf = ...\n",
"\n",
" return pdf\n",
"\n",
"\n",
"# Uncomment below to test your function\n",
"# print(likelihood(1.0, x[1], y[1]))"
]
},
{
"cell_type": "markdown",
"metadata": {
"cellView": "both",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 34
},
"colab_type": "text",
"outputId": "6f1e6ea1-7a95-4b7f-902d-b7ee858b0d76"
},
"source": [
"[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D3_ModelFitting/solutions/W1D3_Tutorial2_Solution_328a0c30.py)\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text"
},
"source": [
"We should see that $\\mathcal{L}(\\theta=1.0|x=2.1,y=3.7) \\approx 0.11$. So far so good, but how does this tell us how this estimate is better than any others?\n",
"\n",
"When dealing with a set of data points, as we are with our dataset, we are concerned with their joint probability -- the likelihood that all data points are explained by our parameterization. Since we have assumed that the noise affects each output independently, we can factorize the likelihood, and write:\n",
"\n",
"\\begin{align}\n",
"\\mathcal{L}(\\theta|X,Y) = \\prod_{i=1}^N \\mathcal{L}(\\theta|x_i,y_i),\n",
"\\end{align}\n",
"\n",
"where we have $N$ data points $X = \\{x_1,...,x_N\\}$ and $Y = \\{y_1,...,y_N\\}$.\n",
"\n",
"In practice, such a product can be numerically unstable. Indeed multiplying small values together can lead to [underflow](https://en.wikipedia.org/wiki/Arithmetic_underflow), the situation in which the digital representation of floating point number reaches its limit. This problem can be circumvented by taking the logarithm of the likelihood because the logarithm transforms products into sums:\n",
"\n",
"\\begin{align}\n",
"\\operatorname{log}\\mathcal{L}(\\theta|X,Y) = \\sum_{i=1}^N \\operatorname{log}\\mathcal{L}(\\theta|x_i,y_i)\n",
"\\end{align}\n",
"\n",
"We can take the sum of the log of the output of our `likelihood` method applied to the full dataset to get a better idea of how different $\\hat\\theta$ compare. We can also plot the different distribution densities over our dataset and see how they line up qualitatively."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"cellView": "form",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 285
},
"colab_type": "code",
"outputId": "2e6d6de4-c5a1-4371-a08b-a2df4eb27735"
},
"outputs": [],
"source": [
"#@title\n",
"#@markdown Execute this cell to visualize different distribution densities\n",
"theta_hats = [0.5, 1.0, 2.2]\n",
"fig, axes = plt.subplots(ncols=3, figsize=(16, 4))\n",
"for theta_hat, ax in zip(theta_hats, axes):\n",
" ll = np.sum(np.log(likelihood(theta_hat, x, y))) # log likelihood\n",
" im = plot_density_image(x, y, theta_hat, ax=ax)\n",
" ax.scatter(x, y)\n",
" ax.set(title=fr'$\\hat{{\\theta}}$ = {theta_hat}, log likelihood: {ll:.2f}')\n",
"plt.colorbar(im, ax=ax);"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text"
},
"source": [
"Using the log likelihood calculation, we see that $\\mathcal{L}(\\theta=1.0) > \\mathcal{L}(\\theta=0.5) > \\mathcal{L}(\\theta=2.2)$.\n",
"\n",
"This is great: now we have a way to compare estimators based on likelihood. But like with the MSE approach, we want an analytic solution to find the best estimator. In this case, we want to find the estimator that maximizes the likelihood.\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text"
},
"source": [
"## Section 1.3: Finding the Maximum Likelihood Estimator\n",
"\n",
"We want to find the parameter value $\\hat\\theta$ that makes our data set most likely:\n",
"\n",
"\\begin{align}\n",
"\\hat{\\theta}_{\\textrm{MLE}} = \\underset{\\theta}{\\operatorname{argmax}} \\mathcal{L}(\\theta|X,Y) \n",
"\\end{align}\n",
"\n",
"We discussed how taking the logarithm of the likelihood helps with numerical stability, the good thing is that it does so without changing the parameter value that maximizes the likelihood. Indeed, the $\\textrm{log}()$ function is *monotonically increasing*, which means that it preserves the order of its inputs. So we have:\n",
"\n",
"\\begin{align}\n",
"\\hat{\\theta}_{\\textrm{MLE}} = \\underset{\\theta}{\\operatorname{argmax}} \\sum_{i=1}^m \\textrm{log} \\mathcal{L}(\\theta|x_i,y_i) \n",
"\\end{align}\n",
"\n",
"Now substituting our specific likelihood function and taking its logarithm, we get:\n",
"\\begin{align}\n",
"\\hat{\\theta}_{\\textrm{MLE}} = \\underset{\\theta}{\\operatorname{argmax}} [-\\frac{N}{2} \\operatorname{log} 2\\pi\\sigma^2 - \\frac{1}{2\\sigma^2}\\sum_{i=1}^N (y_i-\\theta x_i)^2].\n",
"\\end{align}\n",
"\n",
"Note that maximizing the log likelihood is the same as minimizing the negative log likelihood (in practice optimization routines are developed to solve minimization not maximization problems). Because of the convexity of this objective function, we can take the derivative of our negative log likelihhood, set it to 0, and solve - just like our solution to minimizing MSE.\n",
"\n",
"\\begin{align}\n",
"\\frac{\\partial\\operatorname{log}\\mathcal{L}(\\theta|x,y)}{\\partial\\theta}=\\frac{1}{\\sigma^2}\\sum_{i=1}^N(y_i-\\theta x_i)x_i = 0\n",
"\\end{align}\n",
"\n",
"This looks remarkably like the equation we had to solve for the optimal MSE estimator, and, in fact, we arrive to the exact same solution!\n",
"\n",
"\\begin{align}\n",
"\\hat{\\theta}_{\\textrm{MLE}} = \\hat{\\theta}_{\\textrm{MSE}} = \\frac{\\sum_{i=1}^N x_i y_i}{\\sum_{i=1}^N x_i^2}\n",
"\\end{align}"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code"
},
"outputs": [],
"source": [
"# Compute theta_hat_MLE\n",
"theta_hat_mle = (x @ y) / (x @ x)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"cellView": "form",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 430
},
"colab_type": "code",
"outputId": "e2c78f3f-63e2-4f78-a355-7d20db18e9c5"
},
"outputs": [],
"source": [
"#@title\n",
"#@markdown Execute this cell to visualize density with theta_hat_mle\n",
"\n",
"# Plot the resulting distribution density\n",
"fig, ax = plt.subplots()\n",
"ll = np.sum(np.log(likelihood(theta_hat_mle, x, y))) # log likelihood\n",
"im = plot_density_image(x, y, theta_hat_mle, ax=ax)\n",
"plt.colorbar(im, ax=ax);\n",
"ax.scatter(x, y)\n",
"ax.set(title=fr'$\\hat{{\\theta}}$ = {theta_hat_mle:.2f}, log likelihood: {ll:.2f}');"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text"
},
"source": [
"---\n",
"# Summary\n",
"\n",
"- Likelihood vs probability\n",
" - $\\mathcal{L}(\\theta|x, y) = p(y|\\theta, x)$\n",
" - $p(y|\\theta, x)$ -> \"probability of observing the response $y$ given parameter $\\theta$ and input $x$\"\n",
" - $\\mathcal{L}(\\theta|x, y)$ -> \"likelihood model that parameters $\\theta$ produced response $y$ from input $x$\"\n",
"- Log-likelihood maximization\n",
" - We take the $\\textrm{log}$ of the likelihood function for computational convenience\n",
" - The parameters $\\theta$ that maximize $\\textrm{log}\\mathcal{L}(\\theta|x, y)$ are the model parameters that maximize the probability of observing the data.\n",
"- **Key point**:\n",
" - the log-likelihood is a flexible cost function, and is often used to find model parameters that best fit the data."
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text"
},
"source": [
"---\n",
"# Appendix"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text"
},
"source": [
"We can also see $\\mathrm{p}(\\mathrm{y} | \\mathrm{x}, \\theta)$ as a function of $x$. This is the stimulus likelihood function, and it is useful in case we want to decode the input $x$ from observed responses $y$. This is what is relevant from the point of view of a neuron that does not have access to the outside world and tries to infer what's out there from the responses of other neurons!\n",
"\n",
"\n",
"\n"
]
}
],
"metadata": {
"celltoolbar": "Slideshow",
"colab": {
"collapsed_sections": [],
"include_colab_link": true,
"name": "W1D3_Tutorial2",
"provenance": [],
"toc_visible": true
},
"kernel": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.8"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
| {
"pile_set_name": "Github"
} |
//
// PhotosDataSourceFactory.swift
// Unsplash
//
// Created by Olivier Collet on 2017-10-10.
// Copyright © 2017 Unsplash. All rights reserved.
//
import UIKit
enum PhotosDataSourceFactory: PagedDataSourceFactory {
case search(query: String)
case collection(identifier: String)
var dataSource: PagedDataSource {
return PagedDataSource(with: self)
}
func initialCursor() -> UnsplashPagedRequest.Cursor {
switch self {
case .search(let query):
return SearchPhotosRequest.cursor(with: query, page: 1, perPage: 30)
case .collection(let identifier):
let perPage = 30
return GetCollectionPhotosRequest.cursor(with: identifier, page: 1, perPage: perPage)
}
}
func request(with cursor: UnsplashPagedRequest.Cursor) -> UnsplashPagedRequest {
switch self {
case .search(let query):
return SearchPhotosRequest(with: query, page: cursor.page, perPage: cursor.perPage)
case .collection(let identifier):
return GetCollectionPhotosRequest(for: identifier, page: cursor.page, perPage: cursor.perPage)
}
}
}
| {
"pile_set_name": "Github"
} |
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"___\n",
"\n",
"<a href='https://www.udemy.com/user/joseportilla/'><img src='../Pierian_Data_Logo.png'/></a>\n",
"___\n",
"<center><em>Content Copyright by Pierian Data</em></center>"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# zip\n",
"\n",
"zip() makes an iterator that aggregates elements from each of the iterables.\n",
"\n",
"Returns an iterator of tuples, where the i-th tuple contains the i-th element from each of the argument sequences or iterables. The iterator stops when the shortest input iterable is exhausted. With a single iterable argument, it returns an iterator of 1-tuples. With no arguments, it returns an empty iterator. \n",
"\n",
"zip() is equivalent to:\n",
"\n",
" def zip(*iterables):\n",
" # zip('ABCD', 'xy') --> Ax By\n",
" sentinel = object()\n",
" iterators = [iter(it) for it in iterables]\n",
" while iterators:\n",
" result = []\n",
" for it in iterators:\n",
" elem = next(it, sentinel)\n",
" if elem is sentinel:\n",
" return\n",
" result.append(elem)\n",
" yield tuple(result)\n",
" \n",
"\n",
"zip() should only be used with unequal length inputs when you don’t care about trailing, unmatched values from the longer iterables. \n",
"\n",
"Let's see it in action in some examples:\n",
"\n",
"## Examples"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[(1, 4), (2, 5), (3, 6)]"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"x = [1,2,3]\n",
"y = [4,5,6]\n",
"\n",
"# Zip the lists together\n",
"list(zip(x,y))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Note how tuples are returned. What if one iterable is longer than the other?"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[(1, 4), (2, 5), (3, 6)]"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"x = [1,2,3]\n",
"y = [4,5,6,7,8]\n",
"\n",
"# Zip the lists together\n",
"list(zip(x,y))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Note how the zip is defined by the shortest iterable length. Its generally advised not to zip unequal length iterables unless your very sure you only need partial tuple pairings.\n",
"\n",
"What happens if we try to zip together dictionaries?"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[('a', 'c'), ('b', 'd')]"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"d1 = {'a':1,'b':2}\n",
"d2 = {'c':4,'d':5}\n",
"\n",
"list(zip(d1,d2))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This makes sense because simply iterating through the dictionaries will result in just the keys. We would have to call methods to mix keys and values:"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[('c', 1), ('d', 2)]"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"list(zip(d2,d1.values()))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Great! Finally lets use zip() to switch the keys and values of the two dictionaries:"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"def switcharoo(d1,d2):\n",
" dout = {}\n",
" \n",
" for d1key,d2val in zip(d1,d2.values()):\n",
" dout[d1key] = d2val\n",
" \n",
" return dout"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'a': 4, 'b': 5}"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"switcharoo(d1,d2)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Great! You can use zip to save a lot of typing in many situations! You should now have a good understanding of zip() and some possible use cases."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.6"
}
},
"nbformat": 4,
"nbformat_minor": 1
}
| {
"pile_set_name": "Github"
} |
### Let us know how we’re doing!
Please take a moment to fill out the [Microsoft Cloud Workshop Survey](https://forms.office.com/Pages/ResponsePage.aspx?id=v4j5cvGGr0GRqy180BHbRyEtIpX7sDdChuWsXhzKJXJUNjFBVkROWDhSSVdYT0dSRkY4UVFCVzZBVy4u) and help us improve our offerings.
# Cloud-native applications
Fabrikam Medical Conferences provides conference web site services, tailored to the medical community. Their business has grown and the management of many instances of the code base and change cycle per tenant has gotten out of control.
The goal of this workshop is to help them build a proof of concept (POC) that will migrate their code to a more manageable process that involves containerization of tenant code, a better DevOps workflow, and a simple lift-and-shift story for their database backend.
August 2020
## Target Audience
- Application developer
- Infrastructure architect
## Abstracts
### Workshop
In this workshop, you will build a proof of concept (POC) that will transform an existing on-premises application to a container-based application. This POC will deliver a multi-tenant web app hosting solution leveraging Azure Kubernetes Service (AKS), Docker containers on Linux nodes, and a migration from MongoDB to CosmosDB.
At the end of this workshop, you will be better able to improve the reliability of and increase the release cadence of your container-based applications through time-tested DevOps practices.
### Whiteboard Design Session
In this whiteboard design session, you will learn about the choices related to building and deploying containerized applications in Azure, critical decisions around this, and other aspects of the solution, including ways to lift-and-shift parts of the application to reduce applications changes.
By the end of this design session, you will be better able to design solutions that target Azure Kubernetes Service (AKS) and define a DevOps workflow for containerized applications.
### Hands-on Lab
This hands-on lab is designed to guide you through the process of building and deploying Docker images to the Kubernetes platform hosted on Azure Kubernetes Services (AKS), in addition to learning how to work with dynamic service discovery, service scale-out, and high-availability.
At the end of this lab, you will be better able to build and deploy containerized applications to Azure Kubernetes Service and perform common DevOps procedures.
## Azure services and related products
- Azure Kubernetes Service (AKS)
- Azure Container Registry
- GitHub
- Docker
- Cosmos DB (including MongoDB API)
## Azure solutions
App Modernization
## Related references
- [MCW](https://github.com/Microsoft/MCW)
## Help & Support
We welcome feedback and comments from Microsoft SMEs & learning partners who deliver MCWs.
***Having trouble?***
- First, verify you have followed all written lab instructions (including the Before the Hands-on lab document).
- Next, submit an issue with a detailed description of the problem.
- Do not submit pull requests. Our content authors will make all changes and submit pull requests for approval.
If you are planning to present a workshop, *review and test the materials early*! We recommend at least two weeks prior.
### Please allow 5 - 10 business days for review and resolution of issues.
| {
"pile_set_name": "Github"
} |
{
"images" : [
{
"idiom" : "iphone",
"size" : "29x29",
"scale" : "1x"
},
{
"size" : "29x29",
"idiom" : "iphone",
"filename" : "[email protected]",
"scale" : "2x"
},
{
"size" : "40x40",
"idiom" : "iphone",
"filename" : "[email protected]",
"scale" : "2x"
},
{
"idiom" : "iphone",
"size" : "57x57",
"scale" : "1x"
},
{
"idiom" : "iphone",
"size" : "57x57",
"scale" : "2x"
},
{
"size" : "60x60",
"idiom" : "iphone",
"filename" : "[email protected]",
"scale" : "2x"
},
{
"size" : "29x29",
"idiom" : "ipad",
"filename" : "Icon_29x29.png",
"scale" : "1x"
},
{
"size" : "29x29",
"idiom" : "ipad",
"filename" : "[email protected]",
"scale" : "2x"
},
{
"size" : "40x40",
"idiom" : "ipad",
"filename" : "Icon_40x40.png",
"scale" : "1x"
},
{
"size" : "40x40",
"idiom" : "ipad",
"filename" : "[email protected]",
"scale" : "2x"
},
{
"idiom" : "ipad",
"size" : "50x50",
"scale" : "1x"
},
{
"idiom" : "ipad",
"size" : "50x50",
"scale" : "2x"
},
{
"idiom" : "ipad",
"size" : "72x72",
"scale" : "1x"
},
{
"idiom" : "ipad",
"size" : "72x72",
"scale" : "2x"
},
{
"size" : "76x76",
"idiom" : "ipad",
"filename" : "Icon_76x76.png",
"scale" : "1x"
},
{
"size" : "76x76",
"idiom" : "ipad",
"filename" : "[email protected]",
"scale" : "2x"
}
],
"info" : {
"version" : 1,
"author" : "xcode"
}
} | {
"pile_set_name": "Github"
} |
/.*/Attack(
(search_method): GreedySearch
(goal_function): NonOverlappingOutput
(transformation): WordSwapRandomCharacterSubstitution
(constraints):
(0): LevenshteinEditDistance(
(max_edit_distance): 12
)
(1): MaxWordsPerturbed(
(max_percent): 0.75
)
(2): RepeatModification
(3): StopwordModification
(is_black_box): True
)
/.*/
--------------------------------------------- Result 1 ---------------------------------------------
Eine republikanische Strategie, um der Wiederwahl Obamas entgegenzuwirken-->[91m[FAILED][0m
A Republican strategy to counter the re-election of Obama
--------------------------------------------- Result 2 ---------------------------------------------
Die republikanischen Führer rechtfertigten ihre Politik durch die Not-->Repuzlican leaders justifZed their policy by the need to coq
[91mRepublican[0m leaders [91mjustified[0m their policy by the need to [91mcombat[0m electoral fraud.
[94mRepuzlican[0m leaders [94mjustifZed[0m their policy by the need to [94mcoqbat[0m electoral fraud.
+-------------------------------+--------+
| Attack Results | |
+-------------------------------+--------+
| Number of successful attacks: | 1 |
| Number of failed attacks: | 1 |
| Number of skipped attacks: | 0 |
| Original accuracy: | 100.0% |
| Accuracy under attack: | 50.0% |
| Attack success rate: | 50.0% |
| Average perturbed word %: | 25.0% |
| Average num. words per input: | 11.0 |
| Avg num queries: | 19.0 |
+-------------------------------+--------+
| {
"pile_set_name": "Github"
} |
function rebuild(obj)
%REBUILD Rebuild the internal data structures of a DagNN object
% REBUILD(obj) rebuilds the internal data structures
% of the DagNN obj. It is an helper function used internally
% to update the network when layers are added or removed.
varFanIn = zeros(1, numel(obj.vars)) ;
varFanOut = zeros(1, numel(obj.vars)) ;
parFanOut = zeros(1, numel(obj.params)) ;
for l = 1:numel(obj.layers)
ii = obj.getVarIndex(obj.layers(l).inputs) ;
oi = obj.getVarIndex(obj.layers(l).outputs) ;
pi = obj.getParamIndex(obj.layers(l).params) ;
obj.layers(l).inputIndexes = ii ;
obj.layers(l).outputIndexes = oi ;
obj.layers(l).paramIndexes = pi ;
varFanOut(ii) = varFanOut(ii) + 1 ;
varFanIn(oi) = varFanIn(oi) + 1 ;
parFanOut(pi) = parFanOut(pi) + 1 ;
end
[obj.vars.fanin] = tolist(num2cell(varFanIn)) ;
[obj.vars.fanout] = tolist(num2cell(varFanOut)) ;
if ~isempty(parFanOut),
[obj.params.fanout] = tolist(num2cell(parFanOut)) ;
end
% dump unused variables
keep = (varFanIn + varFanOut) > 0 ;
obj.vars = obj.vars(keep) ;
varRemap = cumsum(keep) ;
% dump unused parameters
keep = parFanOut > 0 ;
obj.params = obj.params(keep) ;
parRemap = cumsum(keep) ;
% update the indexes to account for removed layers, variables and parameters
for l = 1:numel(obj.layers)
obj.layers(l).inputIndexes = varRemap(obj.layers(l).inputIndexes) ;
obj.layers(l).outputIndexes = varRemap(obj.layers(l).outputIndexes) ;
obj.layers(l).paramIndexes = parRemap(obj.layers(l).paramIndexes) ;
obj.layers(l).block.layerIndex = l ;
end
% update the variable and parameter names hash maps
obj.varNames = cell2struct(num2cell(1:numel(obj.vars)), {obj.vars.name}, 2) ;
obj.paramNames = cell2struct(num2cell(1:numel(obj.params)), {obj.params.name}, 2) ;
obj.layerNames = cell2struct(num2cell(1:numel(obj.layers)), {obj.layers.name}, 2) ;
% determine the execution order again (and check for consistency)
obj.executionOrder = getOrder(obj) ;
% --------------------------------------------------------------------
function order = getOrder(obj)
% --------------------------------------------------------------------
hops = cell(1, numel(obj.vars)) ;
for l = 1:numel(obj.layers)
for v = obj.layers(l).inputIndexes
hops{v}(end+1) = l ;
end
end
order = zeros(1, numel(obj.layers)) ;
for l = 1:numel(obj.layers)
if order(l) == 0
order = dagSort(obj, hops, order, l) ;
end
end
if any(order == -1)
warning('The network graph contains a cycle') ;
end
[~,order] = sort(order, 'descend') ;
% --------------------------------------------------------------------
function order = dagSort(obj, hops, order, layer)
% --------------------------------------------------------------------
if order(layer) > 0, return ; end
order(layer) = -1 ; % mark as open
n = 0 ;
for o = obj.layers(layer).outputIndexes ;
for child = hops{o}
if order(child) == -1
return ;
end
if order(child) == 0
order = dagSort(obj, hops, order, child) ;
end
n = max(n, order(child)) ;
end
end
order(layer) = n + 1 ;
function varargout = tolist(x)
[varargout{1:numel(x)}] = x{:} ;
| {
"pile_set_name": "Github"
} |
<div class="apiDetail">
<div>
<h2><span>String</span><span class="path">setting.data.key.</span>children</h2>
<h3>概述<span class="h3_info">[ 依赖 <span class="highlight_green">jquery.ztree.core</span> 核心 js ]</span></h3>
<div class="desc">
<p></p>
<div class="longdesc">
<p>zTree 节点数据中保存子节点数据的属性名称。</p>
<p>默认值:"children"</p>
</div>
</div>
<h3>setting 举例</h3>
<h4>1. 设置 zTree 显示节点时,将 treeNode 的 nodes 属性当做节点名称</h4>
<pre xmlns=""><code>var setting = {
data: {
key: {
children: "nodes"
}
}
};
......</code></pre>
</div>
</div> | {
"pile_set_name": "Github"
} |
OUTPUT_FORMAT("elf32-lm32", "elf32-lm32",
"elf32-lm32")
OUTPUT_ARCH(lm32)
ENTRY(start)
STARTUP(start.o)
/* Do we need any of these for elf?
__DYNAMIC = 0; */
/*
* Declare some sizes. Heap is sized at whatever ram space is left.
*/
RamBase = DEFINED(RamBase) ? RamBase : 0x08000000;
RamSize = DEFINED(RamSize) ? RamSize : 32M;
RamEnd = RamBase + RamSize;
HeapSize = DEFINED(HeapSize) ? HeapSize : 2M;
MEMORY {
ebr : ORIGIN = 0x04000000 , LENGTH = 32k
sdram : ORIGIN = 0x08000000 , LENGTH = 32M
}
SECTIONS
{
.boot :
{
KEEP (*(.boot))
} > ebr
/* Read-only sections, merged into text segment: */
.interp : { *(.interp) } > sdram
.hash : { *(.hash) } > sdram
.dynsym : { *(.dynsym) } > sdram
.dynstr : { *(.dynstr) } > sdram
.gnu.version : { *(.gnu.version) } > sdram
.gnu.version_d : { *(.gnu.version_d) } > sdram
.gnu.version_r : { *(.gnu.version_r) } > sdram
.rela.text :
{ *(.rela.text) *(.rela.gnu.linkonce.t*) } > sdram
.rela.data :
{ *(.rela.data) *(.rela.gnu.linkonce.d*) } > sdram
.rela.rodata :
{ *(.rela.rodata*) *(.rela.gnu.linkonce.r*) } > sdram
.rela.got : { *(.rela.got) } > sdram
.rela.got1 : { *(.rela.got1) } > sdram
.rela.got2 : { *(.rela.got2) } > sdram
.rela.ctors : { *(.rela.ctors) } > sdram
.rela.dtors : { *(.rela.dtors) } > sdram
.rela.init : { *(.rela.init) } > sdram
.rela.fini : { *(.rela.fini) } > sdram
.rela.bss : { *(.rela.bss) } > sdram
.rela.plt : { *(.rela.plt) } > sdram
.rela.sdata : { *(.rela.sdata) } > sdram
.rela.sbss : { *(.rela.sbss) } > sdram
.rela.sdata2 : { *(.rela.sdata2) } > sdram
.rela.sbss2 : { *(.rela.sbss2) } > sdram
.rela.dyn : { *(.rela.dyn) } > sdram
.init : { KEEP(*(.init)) } > sdram
.text :
{
*(.text*)
/*
* Special FreeBSD sysctl sections.
*/
. = ALIGN (16);
__start_set_sysctl_set = .;
*(set_sysctl_*);
__stop_set_sysctl_set = ABSOLUTE(.);
*(set_domain_*);
*(set_pseudo_*);
/* .gnu.warning sections are handled specially by elf32.em. */
*(.gnu.warning)
*(.gnu.linkonce.t*)
} > sdram
.fini : { _fini = .; KEEP(*(.fini)) } > sdram
.rodata : { *(.rodata*) KEEP (*(SORT(.rtemsroset.*))) *(.gnu.linkonce.r*) } > sdram
.rodata1 : { *(.rodata1) } > sdram
.tdata : {
_TLS_Data_begin = .;
*(.tdata .tdata.* .gnu.linkonce.td.*)
_TLS_Data_end = .;
} > sdram
.tbss : {
_TLS_BSS_begin = .;
*(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon)
_TLS_BSS_end = .;
} > sdram
_TLS_Data_size = _TLS_Data_end - _TLS_Data_begin;
_TLS_Data_begin = _TLS_Data_size != 0 ? _TLS_Data_begin : _TLS_BSS_begin;
_TLS_Data_end = _TLS_Data_size != 0 ? _TLS_Data_end : _TLS_BSS_begin;
_TLS_BSS_size = _TLS_BSS_end - _TLS_BSS_begin;
_TLS_Size = _TLS_BSS_end - _TLS_Data_begin;
_TLS_Alignment = MAX (ALIGNOF (.tdata), ALIGNOF (.tbss));
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
. = ALIGN(0x10000) + (. & (0x10000 - 1));
/* Ensure the __preinit_array_start label is properly aligned. We
could instead move the label definition inside the section, but
the linker would then create the section even if it turns out to
be empty, which isn't pretty. */
. = ALIGN(32 / 8);
PROVIDE (__preinit_array_start = .);
.preinit_array : { *(.preinit_array) } >sdram
PROVIDE (__preinit_array_end = .);
PROVIDE (__init_array_start = .);
.init_array : { *(.init_array) } >sdram
PROVIDE (__init_array_end = .);
PROVIDE (__fini_array_start = .);
.fini_array : { *(.fini_array) } >sdram
PROVIDE (__fini_array_end = .);
/* _SDA2_BASE_ = __SDATA2_START__ + 0x8000; */
.sdata2 : { *(.sdata2 .sdata2.* .gnu.linkonce.s2.*) } >sdram
.sbss2 : { *(.sbss2 .sbss2.* .gnu.linkonce.sb2.*)
/* avoid empty sdata2/sbss2 area -- __eabi would not set up r2
* which may be important if run-time loading is used
*/
. += 1;
} >sdram
.eh_frame : { *.(eh_frame) } >sdram
/* NOTE: if the BSP uses page tables, the correctness of
* '_etext' (and __DATA_START__) is CRUCIAL - otherwise,
* an invalid mapping may result!!!
*/
_etext = .;
PROVIDE (etext = .);
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. It would
be more correct to do this:
. = ALIGN(0x40000) + (ALIGN(8) & (0x40000 - 1));
The current expression does not correctly handle the case of a
text segment ending precisely at the end of a page; it causes the
data segment to skip a page. The above expression does not have
this problem, but it will currently (2/95) cause BFD to allocate
a single segment, combining both text and data, for this case.
This will prevent the text segment from being shared among
multiple executions of the program; I think that is more
important than losing a page of the virtual address space (note
that no actual memory is lost; the page which is skipped can not
be referenced). */
. = ALIGN(0x1000);
.data ALIGN(0x1000) :
{
/* NOTE: if the BSP uses page tables, the correctness of
* '__DATA_START__' (and _etext) is CRUCIAL - otherwise,
* an invalid mapping may result!!!
*/
PROVIDE(__DATA_START__ = ABSOLUTE(.) );
*(.data .data.* .gnu.linkonce.d*)
KEEP (*(SORT(.rtemsrwset.*)))
SORT(CONSTRUCTORS)
} > sdram
.data1 : { *(.data1) } > sdram
PROVIDE (__EXCEPT_START__ = .);
.gcc_except_table : {
*(.gcc_except_table)
*(.gcc_except_table.*)
} > sdram
PROVIDE (__EXCEPT_END__ = .);
.got1 : { *(.got1) } > sdram
/* Put .ctors and .dtors next to the .got2 section, so that the pointers
get relocated with -mrelocatable. Also put in the .fixup pointers.
The current compiler no longer needs this, but keep it around for 2.7.2 */
PROVIDE (_GOT2_START_ = .);
.got2 : { *(.got2) } > sdram
.dynamic : { *(.dynamic) } > sdram
.ctors :
{
/* gcc uses crtbegin.o to find the start of
the constructors, so we make sure it is
first. Because this is a wildcard, it
doesn't matter if the user does not
actually link against crtbegin.o; the
linker won't look for a file to match a
wildcard. The wildcard also means that it
doesn't matter which directory crtbegin.o
is in. */
KEEP (*crtbegin.o(.ctors))
/* We don't want to include the .ctor section from
from the crtend.o file until after the sorted ctors.
The .ctor section from the crtend file contains the
end of ctors marker and it must be last */
KEEP (*(EXCLUDE_FILE (*crtend.o ) .ctors))
KEEP (*(SORT(.ctors.*)))
KEEP (*(.ctors))
} > sdram
.dtors :
{
KEEP (*crtbegin.o(.dtors))
KEEP (*(EXCLUDE_FILE (*crtend.o ) .dtors))
KEEP (*(SORT(.dtors.*)))
KEEP (*(.dtors))
} > sdram
PROVIDE (_FIXUP_START_ = .);
.fixup : { *(.fixup) } > sdram
PROVIDE (_FIXUP_END_ = .);
PROVIDE (_GOT2_END_ = .);
PROVIDE (_GOT_START_ = .);
.got : { *(.got) } > sdram
.got.plt : { *(.got.plt) } > sdram
PROVIDE (_GOT_END_ = .);
.jcr : { KEEP (*(.jcr)) } > sdram
/* We want the small data sections together, so single-instruction offsets
can access them all, and initialized data all before uninitialized, so
we can shorten the on-disk segment size. */
/* _SDA_BASE_ = __SDATA_START__ + 0x8000; */
.sdata : { *(.sdata*) *(.gnu.linkonce.s.*) } >sdram
_edata = .;
_gp = .;
PROVIDE (edata = .);
.sbss :
{
_clear_start = .;
PROVIDE (__sbss_start = .);
*(.dynsbss)
*(.sbss* .gnu.linkonce.sb.*)
*(.scommon)
/* avoid empty sdata/sbss area -- __eabi would not set up r13
* which may be important if run-time loading is used
*/
. += 1;
. = ALIGN(16);
PROVIDE (__sbss_end = .);
} > sdram
.plt : { *(.plt) } > sdram
.bss :
{
PROVIDE (__bss_start = .);
*(.dynbss)
*(.bss .bss* .gnu.linkonce.b*)
*(COMMON)
. = ALIGN(16);
_end = . ;
__rtems_end = . ;
PROVIDE (end = .);
. = ALIGN (16);
_clear_end = .;
} > sdram
.rtemsstack (NOLOAD) : {
*(SORT(.rtemsstack.*))
WorkAreaBase = .;
} > sdram
/DISCARD/ :
{
*(.comment)
}
/* Stabs debugging sections. */
.stab 0 : { *(.stab) }
.stabstr 0 : { *(.stabstr) }
.stab.excl 0 : { *(.stab.excl) }
.stab.exclstr 0 : { *(.stab.exclstr) }
.stab.index 0 : { *(.stab.index) }
.stab.indexstr 0 : { *(.stab.indexstr) }
.comment 0 : { *(.comment) }
/* DWARF debug sections.
Symbols in the DWARF debugging sections are relative to the beginning
of the section so we begin them at 0. */
/* DWARF 1 */
.debug 0 : { *(.debug) }
.line 0 : { *(.line) }
/* GNU DWARF 1 extensions */
.debug_srcinfo 0 : { *(.debug_srcinfo) }
.debug_sfnames 0 : { *(.debug_sfnames) }
/* DWARF 1.1 and DWARF 2 */
.debug_aranges 0 : { *(.debug_aranges) }
.debug_pubnames 0 : { *(.debug_pubnames) }
/* DWARF 2 */
.debug_info 0 : { *(.debug_info) }
.debug_abbrev 0 : { *(.debug_abbrev) }
.debug_line 0 : { *(.debug_line) }
.debug_frame 0 : { *(.debug_frame) }
.debug_str 0 : { *(.debug_str) }
.debug_loc 0 : { *(.debug_loc) }
.debug_macinfo 0 : { *(.debug_macinfo) }
/* SGI/MIPS DWARF 2 extensions */
.debug_weaknames 0 : { *(.debug_weaknames) }
.debug_funcnames 0 : { *(.debug_funcnames) }
.debug_typenames 0 : { *(.debug_typenames) }
.debug_varnames 0 : { *(.debug_varnames) }
/* These must appear regardless of . */
}
| {
"pile_set_name": "Github"
} |
/* @flow */
import React from "react";
import { TouchableWithoutFeedback, StyleSheet } from "react-native";
import LText from "./LText";
import colors from "../colors";
import { scrollToTop } from "../navigation/utils";
export default function HeaderTitle({ style, ...newProps }: *) {
return (
<TouchableWithoutFeedback onPress={scrollToTop}>
<LText
{...newProps}
secondary
semiBold
style={[styleSheet.root, style]}
/>
</TouchableWithoutFeedback>
);
}
const styleSheet = StyleSheet.create({
root: {
color: colors.darkBlue,
fontSize: 16,
alignItems: "center",
justifyContent: "center",
},
});
| {
"pile_set_name": "Github"
} |
package controllers.connection
import java.util.UUID
import controllers.BaseController
import models.connection.ConnectionSettings
import models.engine.DatabaseEngine
import models.forms.ConnectionForm
import services.connection.ConnectionSettingsService
import services.database.DatabaseRegistry
import util.{ApplicationContext, PasswordEncryptUtils, SlugUtils}
import scala.concurrent.Future
@javax.inject.Singleton
class ConnectionTestController @javax.inject.Inject() (override val ctx: ApplicationContext) extends BaseController {
def test(connectionId: UUID) = withSession("connection.test") { implicit request =>
val result = ConnectionForm.form.bindFromRequest.fold(
formWithErrors => {
val errors = util.web.FormUtils.errorsToString(formWithErrors.errors)
BadRequest(s"Invalid form: $errors")
},
cf => {
val almostUpdated = ConnectionSettings(
id = UUID.randomUUID,
name = cf.name,
slug = SlugUtils.slugFor(cf.name),
owner = request.identity.id,
engine = DatabaseEngine.withName(cf.engine),
host = if (cf.isUrl) { None } else { cf.host },
port = if (cf.isUrl) { None } else { cf.port },
dbName = if (cf.isUrl) { None } else { cf.dbName },
extra = if (cf.isUrl) { None } else { cf.extra },
urlOverride = if (cf.isUrl) { cf.urlOverride } else { None },
username = cf.username
)
val updated = if (cf.password.trim.isEmpty) {
val connOpt = ConnectionSettingsService.getById(connectionId)
almostUpdated.copy(password = connOpt match {
case Some(c) => c.password
case None => PasswordEncryptUtils.encrypt("")
})
} else {
almostUpdated.copy(password = PasswordEncryptUtils.encrypt(cf.password))
}
val result = DatabaseRegistry.connect(updated, 1)
result match {
case Right(x) =>
x._1.close()
Ok("ok: " + x._2)
case Left(x) => Ok("error: " + x.getMessage)
}
}
)
Future.successful(result)
}
}
| {
"pile_set_name": "Github"
} |
#!/usr/bin/python -u
#
# Copyright (c) 2013 by Dhiru Kholia, <dhiru (at) openwall.com>
#
# Python Bindings for LZMA
#
# Copyright (c) 2004-2010 by Joachim Bauch, [email protected]
# 7-Zip Copyright (C) 1999-2010 Igor Pavlov
# LZMA SDK Copyright (C) 1999-2010 Igor Pavlov
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#
"""Read from and write to 7zip format archives.
"""
from binascii import unhexlify
from datetime import datetime
try:
import pylzma
# To install pylzma on Ubuntu:
# apt-get install python-pip python-dev
# pip install pylzma # may do as non-root user in group staff
except ImportError:
pass
from struct import pack, unpack
from zlib import crc32
import zlib
import bz2
import binascii
import StringIO
import sys
import os
try:
from io import BytesIO
except ImportError:
from cStringIO import StringIO as BytesIO
try:
from functools import reduce
except ImportError:
# reduce is available in functools starting with Python 2.6
pass
try:
from pytz import UTC
except ImportError:
# pytz is optional, define own "UTC" timestamp
# reference implementation from Python documentation
from datetime import timedelta, tzinfo
ZERO = timedelta(0)
class UTC(tzinfo):
"""UTC"""
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
try:
unicode
except NameError:
# Python 3.x
def unicode(s, encoding):
return s
else:
def bytes(s, encoding):
return s
READ_BLOCKSIZE = 16384
MAGIC_7Z = unhexlify('377abcaf271c') # '7z\xbc\xaf\x27\x1c'
PROPERTY_END = unhexlify('00') # '\x00'
PROPERTY_HEADER = unhexlify('01') # '\x01'
PROPERTY_ARCHIVE_PROPERTIES = unhexlify('02') # '\x02'
PROPERTY_ADDITIONAL_STREAMS_INFO = unhexlify('03') # '\x03'
PROPERTY_MAIN_STREAMS_INFO = unhexlify('04') # '\x04'
PROPERTY_FILES_INFO = unhexlify('05') # '\x05'
PROPERTY_PACK_INFO = unhexlify('06') # '\x06'
PROPERTY_UNPACK_INFO = unhexlify('07') # '\x07'
PROPERTY_SUBSTREAMS_INFO = unhexlify('08') # '\x08'
PROPERTY_SIZE = unhexlify('09') # '\x09'
PROPERTY_CRC = unhexlify('0a') # '\x0a'
PROPERTY_FOLDER = unhexlify('0b') # '\x0b'
PROPERTY_CODERS_UNPACK_SIZE = unhexlify('0c') # '\x0c'
PROPERTY_NUM_UNPACK_STREAM = unhexlify('0d') # '\x0d'
PROPERTY_EMPTY_STREAM = unhexlify('0e') # '\x0e'
PROPERTY_EMPTY_FILE = unhexlify('0f') # '\x0f'
PROPERTY_ANTI = unhexlify('10') # '\x10'
PROPERTY_NAME = unhexlify('11') # '\x11'
PROPERTY_CREATION_TIME = unhexlify('12') # '\x12'
PROPERTY_LAST_ACCESS_TIME = unhexlify('13') # '\x13'
PROPERTY_LAST_WRITE_TIME = unhexlify('14') # '\x14'
PROPERTY_ATTRIBUTES = unhexlify('15') # '\x15'
PROPERTY_COMMENT = unhexlify('16') # '\x16'
PROPERTY_ENCODED_HEADER = unhexlify('17') # '\x17'
COMPRESSION_METHOD_COPY = unhexlify('00') # '\x00'
COMPRESSION_METHOD_LZMA = unhexlify('03') # '\x03'
COMPRESSION_METHOD_CRYPTO = unhexlify('06') # '\x06'
COMPRESSION_METHOD_MISC = unhexlify('04') # '\x04'
COMPRESSION_METHOD_MISC_ZIP = unhexlify('0401') # '\x04\x01'
COMPRESSION_METHOD_MISC_BZIP = unhexlify('0402') # '\x04\x02'
COMPRESSION_METHOD_7Z_AES256_SHA256 = unhexlify('06f10701') # '\x06\xf1\x07\x01'
# number of seconds between 1601/01/01 and 1970/01/01 (UTC)
# used to adjust 7z FILETIME to Python timestamp
TIMESTAMP_ADJUST = -11644473600
def toTimestamp(filetime):
"""Convert 7z FILETIME to Python timestamp."""
# FILETIME is 100-nanosecond intervals since 1601/01/01 (UTC)
return (filetime / 10000000.0) + TIMESTAMP_ADJUST
class ArchiveError(Exception):
pass
class FormatError(ArchiveError):
pass
class EncryptedArchiveError(ArchiveError):
pass
class UnsupportedCompressionMethodError(ArchiveError):
pass
class DecryptionError(ArchiveError):
pass
class NoPasswordGivenError(DecryptionError):
pass
class WrongPasswordError(DecryptionError):
pass
class ArchiveTimestamp(long):
"""Windows FILETIME timestamp."""
def __repr__(self):
return '%s(%d)' % (type(self).__name__, self)
def as_datetime(self):
"""Convert FILETIME to Python datetime object."""
return datetime.fromtimestamp(toTimestamp(self), UTC)
class Base(object):
""" base class with support for various basic read/write functions """
def _readReal64Bit(self, file):
res = file.read(8)
a, b = unpack('<LL', res)
return b << 32 | a, res
def _read64Bit(self, file):
b = ord(file.read(1))
mask = 0x80
for i in range(8):
if b & mask == 0:
bytes = list(unpack('%dB' % i, file.read(i)))
bytes.reverse()
value = (bytes and reduce(lambda x, y: x << 8 | y, bytes)) or 0
highpart = b & (mask - 1)
return value + (highpart << (i * 8))
mask >>= 1
def _readBoolean(self, file, count, checkall=0):
if checkall:
alldefined = file.read(1)
if alldefined != unhexlify('00'):
return [True] * count
result = []
b = 0
mask = 0
for i in range(count):
if mask == 0:
b = ord(file.read(1))
mask = 0x80
result.append(b & mask != 0)
mask >>= 1
return result
def checkcrc(self, crc, data):
check = crc32(data) & 0xffffffff
return crc == check
class PackInfo(Base):
""" informations about packed streams """
def __init__(self, file):
self.packpos = self._read64Bit(file)
self.numstreams = self._read64Bit(file)
id = file.read(1)
if id == PROPERTY_SIZE:
self.packsizes = [self._read64Bit(file) for x in range(self.numstreams)]
id = file.read(1)
if id == PROPERTY_CRC:
self.crcs = [self._read64Bit(file) for x in range(self.numstreams)]
id = file.read(1)
if id != PROPERTY_END:
raise FormatError('end id expected but %s found' % repr(id))
class Folder(Base):
""" a "Folder" represents a stream of compressed data """
def __init__(self, file):
numcoders = self._read64Bit(file)
self.numcoders = numcoders
self.coders = []
self.digestdefined = False
totalin = 0
self.totalout = 0
for i in range(numcoders):
while True:
b = ord(file.read(1))
methodsize = b & 0xf
issimple = b & 0x10 == 0
noattributes = b & 0x20 == 0
last_alternative = b & 0x80 == 0
c = {}
c['method'] = file.read(methodsize)
if not issimple:
c['numinstreams'] = self._read64Bit(file)
c['numoutstreams'] = self._read64Bit(file)
else:
c['numinstreams'] = 1
c['numoutstreams'] = 1
totalin += c['numinstreams']
self.totalout += c['numoutstreams']
if not noattributes:
c['properties'] = file.read(self._read64Bit(file))
self.coders.append(c)
if last_alternative:
break
numbindpairs = self.totalout - 1
self.bindpairs = []
for i in range(numbindpairs):
self.bindpairs.append((self._read64Bit(file), self._read64Bit(file), ))
numpackedstreams = totalin - numbindpairs
self.numpackedstreams = numpackedstreams
self.packed_indexes = []
if numpackedstreams == 1:
for i in range(totalin):
if self.findInBindPair(i) < 0:
self.packed_indexes.append(i)
elif numpackedstreams > 1:
for i in range(numpackedstreams):
self.packed_indexes.append(self._read64Bit(file))
def getUnpackSize(self):
if not self.unpacksizes:
return 0
r = list(range(len(self.unpacksizes)))
r.reverse()
for i in r:
if self.findOutBindPair(i):
return self.unpacksizes[i]
raise TypeError('not found')
def findInBindPair(self, index):
for idx in range(len(self.bindpairs)):
a, b = self.bindpairs[idx]
if a == index:
return idx
return -1
def findOutBindPair(self, index):
for idx in range(len(self.bindpairs)):
a, b = self.bindpairs[idx]
if b == index:
return idx
return -1
class Digests(Base):
""" holds a list of checksums """
def __init__(self, file, count):
self.defined = self._readBoolean(file, count, checkall=1)
self.crcs = [unpack('<L', file.read(4))[0] for x in range(count)]
UnpackDigests = Digests
class UnpackInfo(Base):
""" combines multiple folders """
def __init__(self, file):
id = file.read(1)
if id != PROPERTY_FOLDER:
raise FormatError('folder id expected but %s found' % repr(id))
self.numfolders = self._read64Bit(file)
self.folders = []
external = file.read(1)
if external == unhexlify('00'):
self.folders = [Folder(file) for x in range(self.numfolders)]
elif external == unhexlify('01'):
self.datastreamidx = self._read64Bit(file)
else:
raise FormatError('0x00 or 0x01 expected but %s found' % repr(external))
id = file.read(1)
if id != PROPERTY_CODERS_UNPACK_SIZE:
raise FormatError('coders unpack size id expected but %s found' % repr(id))
for folder in self.folders:
folder.unpacksizes = [self._read64Bit(file) for x in range(folder.totalout)]
id = file.read(1)
if id == PROPERTY_CRC:
digests = UnpackDigests(file, self.numfolders)
for idx in range(self.numfolders):
folder = self.folders[idx]
folder.digestdefined = digests.defined[idx]
folder.crc = digests.crcs[idx]
id = file.read(1)
if id != PROPERTY_END:
raise FormatError('end id expected but %s found' % repr(id))
class SubstreamsInfo(Base):
""" defines the substreams of a folder """
def __init__(self, file, numfolders, folders):
self.digests = []
self.digestsdefined = []
id = file.read(1)
if id == PROPERTY_NUM_UNPACK_STREAM:
self.numunpackstreams = [self._read64Bit(file) for x in range(numfolders)]
id = file.read(1)
else:
self.numunpackstreams = []
for idx in range(numfolders):
self.numunpackstreams.append(1)
if id == PROPERTY_SIZE:
sum = 0
self.unpacksizes = []
for i in range(len(self.numunpackstreams)):
for j in range(1, self.numunpackstreams[i]):
size = self._read64Bit(file)
self.unpacksizes.append(size)
sum += size
self.unpacksizes.append(folders[i].getUnpackSize() - sum)
id = file.read(1)
numdigests = 0
numdigeststotal = 0
for i in range(numfolders):
numsubstreams = self.numunpackstreams[i]
if numsubstreams != 1 or not folders[i].digestdefined:
numdigests += numsubstreams
numdigeststotal += numsubstreams
if id == PROPERTY_CRC:
digests = Digests(file, numdigests)
didx = 0
for i in range(numfolders):
folder = folders[i]
numsubstreams = self.numunpackstreams[i]
if numsubstreams == 1 and folder.digestdefined:
self.digestsdefined.append(True)
self.digests.append(folder.crc)
else:
for j in range(numsubstreams):
self.digestsdefined.append(digests.defined[didx])
self.digests.append(digests.crcs[didx])
didx += 1
id = file.read(1)
if id != PROPERTY_END:
raise FormatError('end id expected but %r found' % id)
if not self.digestsdefined:
self.digestsdefined = [False] * numdigeststotal
self.digests = [0] * numdigeststotal
class StreamsInfo(Base):
""" informations about compressed streams """
def __init__(self, file):
id = file.read(1)
if id == PROPERTY_PACK_INFO:
self.packinfo = PackInfo(file)
id = file.read(1)
if id == PROPERTY_UNPACK_INFO:
self.unpackinfo = UnpackInfo(file)
id = file.read(1)
if id == PROPERTY_SUBSTREAMS_INFO:
self.substreamsinfo = SubstreamsInfo(file, self.unpackinfo.numfolders, self.unpackinfo.folders)
id = file.read(1)
if id != PROPERTY_END:
raise FormatError('end id expected but %s found' % repr(id))
class FilesInfo(Base):
""" holds file properties """
def _readTimes(self, file, files, name):
defined = self._readBoolean(file, len(files), checkall=1)
# NOTE: the "external" flag is currently ignored, should be 0x00
external = file.read(1)
for i in range(len(files)):
if defined[i]:
files[i][name] = ArchiveTimestamp(self._readReal64Bit(file)[0])
else:
files[i][name] = None
def __init__(self, file):
self.numfiles = self._read64Bit(file)
self.files = [{'emptystream': False} for x in range(self.numfiles)]
numemptystreams = 0
while True:
typ = self._read64Bit(file)
if typ > 255:
raise FormatError('invalid type, must be below 256, is %d' % typ)
typ = pack('B', typ)
if typ == PROPERTY_END:
break
size = self._read64Bit(file)
buffer = BytesIO(file.read(size))
if typ == PROPERTY_EMPTY_STREAM:
isempty = self._readBoolean(buffer, self.numfiles)
list(map(lambda x, y: x.update({'emptystream': y}), self.files, isempty))
for x in isempty:
if x: numemptystreams += 1
emptyfiles = [False] * numemptystreams
antifiles = [False] * numemptystreams
elif typ == PROPERTY_EMPTY_FILE:
emptyfiles = self._readBoolean(buffer, numemptystreams)
elif typ == PROPERTY_ANTI:
antifiles = self._readBoolean(buffer, numemptystreams)
elif typ == PROPERTY_NAME:
external = buffer.read(1)
if external != unhexlify('00'):
self.dataindex = self._read64Bit(buffer)
# XXX: evaluate external
raise NotImplementedError
for f in self.files:
name = ''
while True:
ch = buffer.read(2)
if ch == unhexlify('0000'):
f['filename'] = name
break
name += ch.decode('utf-16')
elif typ == PROPERTY_CREATION_TIME:
self._readTimes(buffer, self.files, 'creationtime')
elif typ == PROPERTY_LAST_ACCESS_TIME:
self._readTimes(buffer, self.files, 'lastaccesstime')
elif typ == PROPERTY_LAST_WRITE_TIME:
self._readTimes(buffer, self.files, 'lastwritetime')
elif typ == PROPERTY_ATTRIBUTES:
defined = self._readBoolean(buffer, self.numfiles, checkall=1)
for i in range(self.numfiles):
f = self.files[i]
if defined[i]:
f['attributes'] = unpack('<L', buffer.read(4))[0]
else:
f['attributes'] = None
else:
raise FormatError('invalid type %r' % (typ))
class Header(Base):
""" the archive header """
def __init__(self, file):
id = file.read(1)
if id == PROPERTY_ARCHIVE_PROPERTIES:
self.properties = ArchiveProperties(file)
id = file.read(1)
if id == PROPERTY_ADDITIONAL_STREAMS_INFO:
self.additional_streams = StreamsInfo(file)
id = file.read(1)
if id == PROPERTY_MAIN_STREAMS_INFO:
self.main_streams = StreamsInfo(file)
id = file.read(1)
if id == PROPERTY_FILES_INFO:
self.files = FilesInfo(file)
id = file.read(1)
if id != PROPERTY_END:
raise FormatError('end id expected but %s found' % (repr(id)))
class ArchiveFile(Base):
""" wrapper around a file in the archive """
def __init__(self, info, start, src_start, size, folder, archive, maxsize=None):
self.digest = None
self._archive = archive
self._file = archive._file
self._start = start
self._src_start = src_start
self._folder = folder
self.size = size
# maxsize is only valid for solid archives
self._maxsize = maxsize
for k, v in info.items():
setattr(self, k, v)
self.reset()
self._decoders = {
COMPRESSION_METHOD_COPY: '_read_copy',
COMPRESSION_METHOD_LZMA: '_read_lzma',
COMPRESSION_METHOD_MISC_ZIP: '_read_zip',
COMPRESSION_METHOD_MISC_BZIP: '_read_bzip',
COMPRESSION_METHOD_7Z_AES256_SHA256: '_read_7z_aes256_sha256',
}
def _is_encrypted(self):
return COMPRESSION_METHOD_7Z_AES256_SHA256 in [x['method'] for x in self._folder.coders]
def reset(self):
self.pos = 0
def read(self):
if not self._folder.coders:
raise TypeError("file has no coder informations")
data = None
for coder in self._folder.coders:
method = coder['method']
decoder = None
while method and decoder is None:
decoder = self._decoders.get(method, None)
method = method[:-1]
if decoder is None:
raise UnsupportedCompressionMethodError(repr(coder['method']))
data = getattr(self, decoder)(coder, data)
return data
def _read_copy(self, coder, input):
if not input:
self._file.seek(self._src_start)
input = self._file.read(self.uncompressed)
return input[self._start:self._start+self.size]
def _read_from_decompressor(self, coder, decompressor, input, checkremaining=False, with_cache=False):
data = ''
idx = 0
cnt = 0
properties = coder.get('properties', None)
if properties:
decompressor.decompress(properties)
total = self.compressed
if not input and total is None:
remaining = self._start+self.size
out = BytesIO()
cache = getattr(self._folder, '_decompress_cache', None)
if cache is not None:
data, pos, decompressor = cache
out.write(data)
remaining -= len(data)
self._file.seek(pos)
else:
self._file.seek(self._src_start)
checkremaining = checkremaining and not self._folder.solid
while remaining > 0:
data = self._file.read(READ_BLOCKSIZE)
if checkremaining or (with_cache and len(data) < READ_BLOCKSIZE):
tmp = decompressor.decompress(data, remaining)
else:
tmp = decompressor.decompress(data)
assert len(tmp) > 0
out.write(tmp)
remaining -= len(tmp)
data = out.getvalue()
if with_cache and self._folder.solid:
# don't decompress start of solid archive for next file
# TODO: limit size of cached data
self._folder._decompress_cache = (data, self._file.tell(), decompressor)
else:
if not input:
self._file.seek(self._src_start)
input = self._file.read(total)
if checkremaining:
data = decompressor.decompress(input, self._start+self.size)
else:
data = decompressor.decompress(input)
return data[self._start:self._start+self.size]
def _read_lzma(self, coder, input):
dec = pylzma.decompressobj(maxlength=self._start+self.size)
try:
return self._read_from_decompressor(coder, dec, input, checkremaining=True, with_cache=True)
except ValueError:
if self._is_encrypted():
raise WrongPasswordError('invalid password')
raise
def _read_zip(self, coder, input):
dec = zlib.decompressobj(-15)
return self._read_from_decompressor(coder, dec, input, checkremaining=True)
def _read_bzip(self, coder, input):
dec = bz2.BZ2Decompressor()
return self._read_from_decompressor(coder, dec, input)
def read_7z_aes256_sha256(self, coder, input):
if not self._archive.password:
raise NoPasswordGivenError()
# TODO: this needs some sanity checks
firstbyte = ord(coder['properties'][0])
numcyclespower = firstbyte & 0x3f
if firstbyte & 0xc0 != 0:
saltsize = (firstbyte >> 7) & 1
ivsize = (firstbyte >> 6) & 1
secondbyte = ord(coder['properties'][1])
saltsize += (secondbyte >> 4)
ivsize += (secondbyte & 0x0f)
assert len(coder['properties']) == 2+saltsize+ivsize
salt = coder['properties'][2:2+saltsize]
iv = coder['properties'][2+saltsize:2+saltsize+ivsize]
assert len(salt) == saltsize
assert len(iv) == ivsize
assert numcyclespower <= 24
if ivsize < 16:
iv += '\x00'*(16-ivsize)
else:
salt = iv = ''
password = self._archive.password.encode('utf-16-le')
key = pylzma.calculate_key(password, numcyclespower, salt=salt)
cipher = pylzma.AESDecrypt(key, iv=iv)
if not input:
self._file.seek(self._src_start)
uncompressed_size = self.uncompressed
if uncompressed_size & 0x0f:
# we need a multiple of 16 bytes
uncompressed_size += 16 - (uncompressed_size & 0x0f)
input = self._file.read(uncompressed_size)
result = cipher.decrypt(input)
return result
def checkcrc(self):
if self.digest is None:
return True
self.reset()
data = self.read()
return super(ArchiveFile, self).checkcrc(self.digest, data)
# XXX global state
iv = None
ivSize = None
Salt = None
NumCyclesPower = None
SaltSize = None
def SetDecoderProperties2(data):
global iv, ivSize, Salt, NumCyclesPower, SaltSize
pos = 0
data = bytearray(data)
firstByte = data[pos]
pos = pos + 1
NumCyclesPower = firstByte & 0x3F;
if NumCyclesPower > 24:
# print "Bad NumCyclesPower value"
return None
if ((firstByte & 0xC0) == 0):
# XXX
return "S_OK"
SaltSize = (firstByte >> 7) & 1;
ivSize = (firstByte >> 6) & 1;
secondByte = data[pos]
pos = pos + 1
SaltSize += (secondByte >> 4);
ivSize += (secondByte & 0x0F);
# get salt
Salt = data[pos:pos+SaltSize]
Salt = str(Salt)
pos = pos + SaltSize
# get iv
iv = data[pos:pos+ivSize]
iv = str(iv)
if len(iv) < 16:
iv = iv + "\x00" * (16 - len(iv))
return "OK"
class Archive7z(Base):
""" the archive itself """
def __init__(self, file, password=None):
self._file = file
self.password = password
self.header = file.read(len(MAGIC_7Z))
if self.header != MAGIC_7Z:
raise FormatError('not a 7z file')
self.version = unpack('BB', file.read(2))
self.startheadercrc = unpack('<L', file.read(4))[0]
self.nextheaderofs, data = self._readReal64Bit(file)
crc = crc32(data)
self.nextheadersize, data = self._readReal64Bit(file)
crc = crc32(data, crc)
data = file.read(4)
self.nextheadercrc = unpack('<L', data)[0]
crc = crc32(data, crc) & 0xffffffff
if crc != self.startheadercrc:
raise FormatError('invalid header data')
self.afterheader = file.tell()
file.seek(self.nextheaderofs, 1)
buffer = BytesIO(file.read(self.nextheadersize))
if not self.checkcrc(self.nextheadercrc, buffer.getvalue()):
raise FormatError('invalid header data')
while True:
id = buffer.read(1)
if not id or id == PROPERTY_HEADER:
break
if id != PROPERTY_ENCODED_HEADER:
raise TypeError('Unknown field: %r' % (id))
# ReadAndDecodePackedStreams (7zIn.cpp)
streams = StreamsInfo(buffer)
file.seek(self.afterheader + 0)
data = bytes('', 'ascii')
for folder in streams.unpackinfo.folders:
file.seek(streams.packinfo.packpos, 1)
props = folder.coders[0]['properties']
# decode properties
if SetDecoderProperties2(props):
# derive keys
# password = "password".encode('utf-16-le')
# print NumCyclesPower, Salt, password
# key = pylzma.calculate_key(password, NumCyclesPower, salt=Salt)
# cipher = pylzma.AESDecrypt(key, iv=str(iv))
global Salt
if len(Salt) == 0:
Salt = "\x11\x22" # fake salt
for idx in range(len(streams.packinfo.packsizes)):
tmp = file.read(streams.packinfo.packsizes[idx])
fname = os.path.basename(self._file.name)
print "%s:$7z$0$%s$%s$%s$%s$%s$%s$%s$%s$%s" % (fname,
NumCyclesPower, SaltSize, binascii.hexlify(Salt),
ivSize, binascii.hexlify(iv), folder.crc, len(tmp),
folder.unpacksizes[idx], binascii.hexlify(tmp))
# print binascii.hexlify(tmp)
# result = cipher.decrypt(tmp)
# print folder.unpacksizes
# print folder.coders
# XXX we don't now how to handle unpacksizes of size > 1
# XXX we need to locate correct data and pass it to correct decompressor
# XXX correct decompressor can be located from folder.coders
# data = result # for checksum check
size = folder.unpacksizes[idx] # for checksum check
if len(folder.unpacksizes) > 1:
sys.stderr.write("%s : multiple unpacksizes found, not supported fully yet!\n" % fname)
# print binascii.hexlify(result)
# flds = Folder(BytesIO(result))
# print flds.coders
# print flds.packed_indexes, flds.totalout
# XXX return can't be right
return
# else:
# for idx in range(len(streams.packinfo.packsizes)):
# tmp = file.read(streams.packinfo.packsizes[idx])
# data += pylzma.decompress(props+tmp, maxlength=folder.unpacksizes[idx])
#
# if folder.digestdefined:
# if not self.checkcrc(folder.crc, data[0:size]):
# raise FormatError('invalid block data')
# # XXX return can't be right
# return
# XXX this part is not done yet
sys.stderr.write("%s : 7-Zip files without header encryption are *not* supported yet!\n" % (file.name))
return
buffer = BytesIO(file.read())
id = buffer.read(1)
self.files = []
if not id:
# empty archive
self.solid = False
self.numfiles = 0
self.filenames = []
return
xx = FilesInfo(buffer)
self.header = Header(buffer)
files = self.header.files
folders = self.header.main_streams.unpackinfo.folders
packinfo = self.header.main_streams.packinfo
subinfo = self.header.main_streams.substreamsinfo
packsizes = packinfo.packsizes
self.solid = packinfo.numstreams == 1
if hasattr(subinfo, 'unpacksizes'):
unpacksizes = subinfo.unpacksizes
else:
unpacksizes = [x.unpacksizes[0] for x in folders]
fidx = 0
obidx = 0
src_pos = self.afterheader
pos = 0
folder_start = 0
folder_pos = src_pos
maxsize = (self.solid and packinfo.packsizes[0]) or None
for idx in range(files.numfiles):
info = files.files[idx]
if info['emptystream']:
continue
folder = folders[fidx]
folder.solid = subinfo.numunpackstreams[fidx] > 1
maxsize = (folder.solid and packinfo.packsizes[fidx]) or None
if folder.solid:
# file is part of solid archive
info['compressed'] = None
elif obidx < len(packsizes):
# file is compressed
info['compressed'] = packsizes[obidx]
else:
# file is not compressed
info['compressed'] = unpacksizes[obidx]
info['uncompressed'] = unpacksizes[obidx]
file = ArchiveFile(info, pos, src_pos, unpacksizes[obidx], folder, self, maxsize=maxsize)
if subinfo.digestsdefined[obidx]:
file.digest = subinfo.digests[obidx]
self.files.append(file)
if folder.solid:
pos += unpacksizes[obidx]
else:
src_pos += info['compressed']
obidx += 1
if idx >= subinfo.numunpackstreams[fidx]+folder_start:
folder_pos += packinfo.packsizes[fidx]
src_pos = folder_pos
folder_start = idx
fidx += 1
self.numfiles = len(self.files)
self.filenames = map(lambda x: x.filename, self.files)
# interface like TarFile
def getmember(self, name):
# XXX: store files in dictionary
for f in self.files:
if f.filename == name:
return f
return None
def getmembers(self):
return self.files
def getnames(self):
return self.filenames
def list(self, verbose=True):
print ('total %d files in %sarchive' % (self.numfiles, (self.solid and 'solid ') or ''))
if not verbose:
print ('\n'.join(self.filenames))
return
for f in self.files:
extra = (f.compressed and '%10d ' % (f.compressed)) or ' '
print ('%10d%s%.8x %s' % (f.size, extra, f.digest, f.filename))
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.stdout.write("Usage: %s < encrypted 7-Zip files >\n" % \
sys.argv[0])
for filename in sys.argv[1:]:
f = Archive7z(open(filename, 'rb'))
| {
"pile_set_name": "Github"
} |
Network Working Group J. Myers
Request for Comments: 1939 Carnegie Mellon
STD: 53 M. Rose
Obsoletes: 1725 Dover Beach Consulting, Inc.
Category: Standards Track May 1996
Post Office Protocol - Version 3
Status of this Memo
This document specifies an Internet standards track protocol for the
Internet community, and requests discussion and suggestions for
improvements. Please refer to the current edition of the "Internet
Official Protocol Standards" (STD 1) for the standardization state
and status of this protocol. Distribution of this memo is unlimited.
Table of Contents
1. Introduction ................................................ 2
2. A Short Digression .......................................... 2
3. Basic Operation ............................................. 3
4. The AUTHORIZATION State ..................................... 4
QUIT Command ................................................ 5
5. The TRANSACTION State ....................................... 5
STAT Command ................................................ 6
LIST Command ................................................ 6
RETR Command ................................................ 8
DELE Command ................................................ 8
NOOP Command ................................................ 9
RSET Command ................................................ 9
6. The UPDATE State ............................................ 10
QUIT Command ................................................ 10
7. Optional POP3 Commands ...................................... 11
TOP Command ................................................. 11
UIDL Command ................................................ 12
USER Command ................................................ 13
PASS Command ................................................ 14
APOP Command ................................................ 15
8. Scaling and Operational Considerations ...................... 16
9. POP3 Command Summary ........................................ 18
10. Example POP3 Session ....................................... 19
11. Message Format ............................................. 19
12. References ................................................. 20
13. Security Considerations .................................... 20
14. Acknowledgements ........................................... 20
15. Authors' Addresses ......................................... 21
Appendix A. Differences from RFC 1725 .......................... 22
Myers & Rose Standards Track [Page 1]
RFC 1939 POP3 May 1996
Appendix B. Command Index ...................................... 23
1. Introduction
On certain types of smaller nodes in the Internet it is often
impractical to maintain a message transport system (MTS). For
example, a workstation may not have sufficient resources (cycles,
disk space) in order to permit a SMTP server [RFC821] and associated
local mail delivery system to be kept resident and continuously
running. Similarly, it may be expensive (or impossible) to keep a
personal computer interconnected to an IP-style network for long
amounts of time (the node is lacking the resource known as
"connectivity").
Despite this, it is often very useful to be able to manage mail on
these smaller nodes, and they often support a user agent (UA) to aid
the tasks of mail handling. To solve this problem, a node which can
support an MTS entity offers a maildrop service to these less endowed
nodes. The Post Office Protocol - Version 3 (POP3) is intended to
permit a workstation to dynamically access a maildrop on a server
host in a useful fashion. Usually, this means that the POP3 protocol
is used to allow a workstation to retrieve mail that the server is
holding for it.
POP3 is not intended to provide extensive manipulation operations of
mail on the server; normally, mail is downloaded and then deleted. A
more advanced (and complex) protocol, IMAP4, is discussed in
[RFC1730].
For the remainder of this memo, the term "client host" refers to a
host making use of the POP3 service, while the term "server host"
refers to a host which offers the POP3 service.
2. A Short Digression
This memo does not specify how a client host enters mail into the
transport system, although a method consistent with the philosophy of
this memo is presented here:
When the user agent on a client host wishes to enter a message
into the transport system, it establishes an SMTP connection to
its relay host and sends all mail to it. This relay host could
be, but need not be, the POP3 server host for the client host. Of
course, the relay host must accept mail for delivery to arbitrary
recipient addresses, that functionality is not required of all
SMTP servers.
Myers & Rose Standards Track [Page 2]
RFC 1939 POP3 May 1996
3. Basic Operation
Initially, the server host starts the POP3 service by listening on
TCP port 110. When a client host wishes to make use of the service,
it establishes a TCP connection with the server host. When the
connection is established, the POP3 server sends a greeting. The
client and POP3 server then exchange commands and responses
(respectively) until the connection is closed or aborted.
Commands in the POP3 consist of a case-insensitive keyword, possibly
followed by one or more arguments. All commands are terminated by a
CRLF pair. Keywords and arguments consist of printable ASCII
characters. Keywords and arguments are each separated by a single
SPACE character. Keywords are three or four characters long. Each
argument may be up to 40 characters long.
Responses in the POP3 consist of a status indicator and a keyword
possibly followed by additional information. All responses are
terminated by a CRLF pair. Responses may be up to 512 characters
long, including the terminating CRLF. There are currently two status
indicators: positive ("+OK") and negative ("-ERR"). Servers MUST
send the "+OK" and "-ERR" in upper case.
Responses to certain commands are multi-line. In these cases, which
are clearly indicated below, after sending the first line of the
response and a CRLF, any additional lines are sent, each terminated
by a CRLF pair. When all lines of the response have been sent, a
final line is sent, consisting of a termination octet (decimal code
046, ".") and a CRLF pair. If any line of the multi-line response
begins with the termination octet, the line is "byte-stuffed" by
pre-pending the termination octet to that line of the response.
Hence a multi-line response is terminated with the five octets
"CRLF.CRLF". When examining a multi-line response, the client checks
to see if the line begins with the termination octet. If so and if
octets other than CRLF follow, the first octet of the line (the
termination octet) is stripped away. If so and if CRLF immediately
follows the termination character, then the response from the POP
server is ended and the line containing ".CRLF" is not considered
part of the multi-line response.
A POP3 session progresses through a number of states during its
lifetime. Once the TCP connection has been opened and the POP3
server has sent the greeting, the session enters the AUTHORIZATION
state. In this state, the client must identify itself to the POP3
server. Once the client has successfully done this, the server
acquires resources associated with the client's maildrop, and the
session enters the TRANSACTION state. In this state, the client
requests actions on the part of the POP3 server. When the client has
Myers & Rose Standards Track [Page 3]
RFC 1939 POP3 May 1996
issued the QUIT command, the session enters the UPDATE state. In
this state, the POP3 server releases any resources acquired during
the TRANSACTION state and says goodbye. The TCP connection is then
closed.
A server MUST respond to an unrecognized, unimplemented, or
syntactically invalid command by responding with a negative status
indicator. A server MUST respond to a command issued when the
session is in an incorrect state by responding with a negative status
indicator. There is no general method for a client to distinguish
between a server which does not implement an optional command and a
server which is unwilling or unable to process the command.
A POP3 server MAY have an inactivity autologout timer. Such a timer
MUST be of at least 10 minutes' duration. The receipt of any command
from the client during that interval should suffice to reset the
autologout timer. When the timer expires, the session does NOT enter
the UPDATE state--the server should close the TCP connection without
removing any messages or sending any response to the client.
4. The AUTHORIZATION State
Once the TCP connection has been opened by a POP3 client, the POP3
server issues a one line greeting. This can be any positive
response. An example might be:
S: +OK POP3 server ready
The POP3 session is now in the AUTHORIZATION state. The client must
now identify and authenticate itself to the POP3 server. Two
possible mechanisms for doing this are described in this document,
the USER and PASS command combination and the APOP command. Both
mechanisms are described later in this document. Additional
authentication mechanisms are described in [RFC1734]. While there is
no single authentication mechanism that is required of all POP3
servers, a POP3 server must of course support at least one
authentication mechanism.
Once the POP3 server has determined through the use of any
authentication command that the client should be given access to the
appropriate maildrop, the POP3 server then acquires an exclusive-
access lock on the maildrop, as necessary to prevent messages from
being modified or removed before the session enters the UPDATE state.
If the lock is successfully acquired, the POP3 server responds with a
positive status indicator. The POP3 session now enters the
TRANSACTION state, with no messages marked as deleted. If the
maildrop cannot be opened for some reason (for example, a lock can
not be acquired, the client is denied access to the appropriate
Myers & Rose Standards Track [Page 4]
RFC 1939 POP3 May 1996
maildrop, or the maildrop cannot be parsed), the POP3 server responds
with a negative status indicator. (If a lock was acquired but the
POP3 server intends to respond with a negative status indicator, the
POP3 server must release the lock prior to rejecting the command.)
After returning a negative status indicator, the server may close the
connection. If the server does not close the connection, the client
may either issue a new authentication command and start again, or the
client may issue the QUIT command.
After the POP3 server has opened the maildrop, it assigns a message-
number to each message, and notes the size of each message in octets.
The first message in the maildrop is assigned a message-number of
"1", the second is assigned "2", and so on, so that the nth message
in a maildrop is assigned a message-number of "n". In POP3 commands
and responses, all message-numbers and message sizes are expressed in
base-10 (i.e., decimal).
Here is the summary for the QUIT command when used in the
AUTHORIZATION state:
QUIT
Arguments: none
Restrictions: none
Possible Responses:
+OK
Examples:
C: QUIT
S: +OK dewey POP3 server signing off
5. The TRANSACTION State
Once the client has successfully identified itself to the POP3 server
and the POP3 server has locked and opened the appropriate maildrop,
the POP3 session is now in the TRANSACTION state. The client may now
issue any of the following POP3 commands repeatedly. After each
command, the POP3 server issues a response. Eventually, the client
issues the QUIT command and the POP3 session enters the UPDATE state.
Myers & Rose Standards Track [Page 5]
RFC 1939 POP3 May 1996
Here are the POP3 commands valid in the TRANSACTION state:
STAT
Arguments: none
Restrictions:
may only be given in the TRANSACTION state
Discussion:
The POP3 server issues a positive response with a line
containing information for the maildrop. This line is
called a "drop listing" for that maildrop.
In order to simplify parsing, all POP3 servers are
required to use a certain format for drop listings. The
positive response consists of "+OK" followed by a single
space, the number of messages in the maildrop, a single
space, and the size of the maildrop in octets. This memo
makes no requirement on what follows the maildrop size.
Minimal implementations should just end that line of the
response with a CRLF pair. More advanced implementations
may include other information.
NOTE: This memo STRONGLY discourages implementations
from supplying additional information in the drop
listing. Other, optional, facilities are discussed
later on which permit the client to parse the messages
in the maildrop.
Note that messages marked as deleted are not counted in
either total.
Possible Responses:
+OK nn mm
Examples:
C: STAT
S: +OK 2 320
LIST [msg]
Arguments:
a message-number (optional), which, if present, may NOT
refer to a message marked as deleted
Myers & Rose Standards Track [Page 6]
RFC 1939 POP3 May 1996
Restrictions:
may only be given in the TRANSACTION state
Discussion:
If an argument was given and the POP3 server issues a
positive response with a line containing information for
that message. This line is called a "scan listing" for
that message.
If no argument was given and the POP3 server issues a
positive response, then the response given is multi-line.
After the initial +OK, for each message in the maildrop,
the POP3 server responds with a line containing
information for that message. This line is also called a
"scan listing" for that message. If there are no
messages in the maildrop, then the POP3 server responds
with no scan listings--it issues a positive response
followed by a line containing a termination octet and a
CRLF pair.
In order to simplify parsing, all POP3 servers are
required to use a certain format for scan listings. A
scan listing consists of the message-number of the
message, followed by a single space and the exact size of
the message in octets. Methods for calculating the exact
size of the message are described in the "Message Format"
section below. This memo makes no requirement on what
follows the message size in the scan listing. Minimal
implementations should just end that line of the response
with a CRLF pair. More advanced implementations may
include other information, as parsed from the message.
NOTE: This memo STRONGLY discourages implementations
from supplying additional information in the scan
listing. Other, optional, facilities are discussed
later on which permit the client to parse the messages
in the maildrop.
Note that messages marked as deleted are not listed.
Possible Responses:
+OK scan listing follows
-ERR no such message
Examples:
C: LIST
S: +OK 2 messages (320 octets)
S: 1 120
Myers & Rose Standards Track [Page 7]
RFC 1939 POP3 May 1996
S: 2 200
S: .
...
C: LIST 2
S: +OK 2 200
...
C: LIST 3
S: -ERR no such message, only 2 messages in maildrop
RETR msg
Arguments:
a message-number (required) which may NOT refer to a
message marked as deleted
Restrictions:
may only be given in the TRANSACTION state
Discussion:
If the POP3 server issues a positive response, then the
response given is multi-line. After the initial +OK, the
POP3 server sends the message corresponding to the given
message-number, being careful to byte-stuff the termination
character (as with all multi-line responses).
Possible Responses:
+OK message follows
-ERR no such message
Examples:
C: RETR 1
S: +OK 120 octets
S: <the POP3 server sends the entire message here>
S: .
DELE msg
Arguments:
a message-number (required) which may NOT refer to a
message marked as deleted
Restrictions:
may only be given in the TRANSACTION state
Myers & Rose Standards Track [Page 8]
RFC 1939 POP3 May 1996
Discussion:
The POP3 server marks the message as deleted. Any future
reference to the message-number associated with the message
in a POP3 command generates an error. The POP3 server does
not actually delete the message until the POP3 session
enters the UPDATE state.
Possible Responses:
+OK message deleted
-ERR no such message
Examples:
C: DELE 1
S: +OK message 1 deleted
...
C: DELE 2
S: -ERR message 2 already deleted
NOOP
Arguments: none
Restrictions:
may only be given in the TRANSACTION state
Discussion:
The POP3 server does nothing, it merely replies with a
positive response.
Possible Responses:
+OK
Examples:
C: NOOP
S: +OK
RSET
Arguments: none
Restrictions:
may only be given in the TRANSACTION state
Discussion:
If any messages have been marked as deleted by the POP3
server, they are unmarked. The POP3 server then replies
Myers & Rose Standards Track [Page 9]
RFC 1939 POP3 May 1996
with a positive response.
Possible Responses:
+OK
Examples:
C: RSET
S: +OK maildrop has 2 messages (320 octets)
6. The UPDATE State
When the client issues the QUIT command from the TRANSACTION state,
the POP3 session enters the UPDATE state. (Note that if the client
issues the QUIT command from the AUTHORIZATION state, the POP3
session terminates but does NOT enter the UPDATE state.)
If a session terminates for some reason other than a client-issued
QUIT command, the POP3 session does NOT enter the UPDATE state and
MUST not remove any messages from the maildrop.
QUIT
Arguments: none
Restrictions: none
Discussion:
The POP3 server removes all messages marked as deleted
from the maildrop and replies as to the status of this
operation. If there is an error, such as a resource
shortage, encountered while removing messages, the
maildrop may result in having some or none of the messages
marked as deleted be removed. In no case may the server
remove any messages not marked as deleted.
Whether the removal was successful or not, the server
then releases any exclusive-access lock on the maildrop
and closes the TCP connection.
Possible Responses:
+OK
-ERR some deleted messages not removed
Examples:
C: QUIT
S: +OK dewey POP3 server signing off (maildrop empty)
...
C: QUIT
Myers & Rose Standards Track [Page 10]
RFC 1939 POP3 May 1996
S: +OK dewey POP3 server signing off (2 messages left)
...
7. Optional POP3 Commands
The POP3 commands discussed above must be supported by all minimal
implementations of POP3 servers.
The optional POP3 commands described below permit a POP3 client
greater freedom in message handling, while preserving a simple POP3
server implementation.
NOTE: This memo STRONGLY encourages implementations to support
these commands in lieu of developing augmented drop and scan
listings. In short, the philosophy of this memo is to put
intelligence in the part of the POP3 client and not the POP3
server.
TOP msg n
Arguments:
a message-number (required) which may NOT refer to to a
message marked as deleted, and a non-negative number
of lines (required)
Restrictions:
may only be given in the TRANSACTION state
Discussion:
If the POP3 server issues a positive response, then the
response given is multi-line. After the initial +OK, the
POP3 server sends the headers of the message, the blank
line separating the headers from the body, and then the
number of lines of the indicated message's body, being
careful to byte-stuff the termination character (as with
all multi-line responses).
Note that if the number of lines requested by the POP3
client is greater than than the number of lines in the
body, then the POP3 server sends the entire message.
Possible Responses:
+OK top of message follows
-ERR no such message
Examples:
C: TOP 1 10
S: +OK
Myers & Rose Standards Track [Page 11]
RFC 1939 POP3 May 1996
S: <the POP3 server sends the headers of the
message, a blank line, and the first 10 lines
of the body of the message>
S: .
...
C: TOP 100 3
S: -ERR no such message
UIDL [msg]
Arguments:
a message-number (optional), which, if present, may NOT
refer to a message marked as deleted
Restrictions:
may only be given in the TRANSACTION state.
Discussion:
If an argument was given and the POP3 server issues a positive
response with a line containing information for that message.
This line is called a "unique-id listing" for that message.
If no argument was given and the POP3 server issues a positive
response, then the response given is multi-line. After the
initial +OK, for each message in the maildrop, the POP3 server
responds with a line containing information for that message.
This line is called a "unique-id listing" for that message.
In order to simplify parsing, all POP3 servers are required to
use a certain format for unique-id listings. A unique-id
listing consists of the message-number of the message,
followed by a single space and the unique-id of the message.
No information follows the unique-id in the unique-id listing.
The unique-id of a message is an arbitrary server-determined
string, consisting of one to 70 characters in the range 0x21
to 0x7E, which uniquely identifies a message within a
maildrop and which persists across sessions. This
persistence is required even if a session ends without
entering the UPDATE state. The server should never reuse an
unique-id in a given maildrop, for as long as the entity
using the unique-id exists.
Note that messages marked as deleted are not listed.
While it is generally preferable for server implementations
to store arbitrarily assigned unique-ids in the maildrop,
Myers & Rose Standards Track [Page 12]
RFC 1939 POP3 May 1996
this specification is intended to permit unique-ids to be
calculated as a hash of the message. Clients should be able
to handle a situation where two identical copies of a
message in a maildrop have the same unique-id.
Possible Responses:
+OK unique-id listing follows
-ERR no such message
Examples:
C: UIDL
S: +OK
S: 1 whqtswO00WBw418f9t5JxYwZ
S: 2 QhdPYR:00WBw1Ph7x7
S: .
...
C: UIDL 2
S: +OK 2 QhdPYR:00WBw1Ph7x7
...
C: UIDL 3
S: -ERR no such message, only 2 messages in maildrop
USER name
Arguments:
a string identifying a mailbox (required), which is of
significance ONLY to the server
Restrictions:
may only be given in the AUTHORIZATION state after the POP3
greeting or after an unsuccessful USER or PASS command
Discussion:
To authenticate using the USER and PASS command
combination, the client must first issue the USER
command. If the POP3 server responds with a positive
status indicator ("+OK"), then the client may issue
either the PASS command to complete the authentication,
or the QUIT command to terminate the POP3 session. If
the POP3 server responds with a negative status indicator
("-ERR") to the USER command, then the client may either
issue a new authentication command or may issue the QUIT
command.
The server may return a positive response even though no
such mailbox exists. The server may return a negative
response if mailbox exists, but does not permit plaintext
Myers & Rose Standards Track [Page 13]
RFC 1939 POP3 May 1996
password authentication.
Possible Responses:
+OK name is a valid mailbox
-ERR never heard of mailbox name
Examples:
C: USER frated
S: -ERR sorry, no mailbox for frated here
...
C: USER mrose
S: +OK mrose is a real hoopy frood
PASS string
Arguments:
a server/mailbox-specific password (required)
Restrictions:
may only be given in the AUTHORIZATION state immediately
after a successful USER command
Discussion:
When the client issues the PASS command, the POP3 server
uses the argument pair from the USER and PASS commands to
determine if the client should be given access to the
appropriate maildrop.
Since the PASS command has exactly one argument, a POP3
server may treat spaces in the argument as part of the
password, instead of as argument separators.
Possible Responses:
+OK maildrop locked and ready
-ERR invalid password
-ERR unable to lock maildrop
Examples:
C: USER mrose
S: +OK mrose is a real hoopy frood
C: PASS secret
S: -ERR maildrop already locked
...
C: USER mrose
S: +OK mrose is a real hoopy frood
C: PASS secret
S: +OK mrose's maildrop has 2 messages (320 octets)
Myers & Rose Standards Track [Page 14]
RFC 1939 POP3 May 1996
APOP name digest
Arguments:
a string identifying a mailbox and a MD5 digest string
(both required)
Restrictions:
may only be given in the AUTHORIZATION state after the POP3
greeting or after an unsuccessful USER or PASS command
Discussion:
Normally, each POP3 session starts with a USER/PASS
exchange. This results in a server/user-id specific
password being sent in the clear on the network. For
intermittent use of POP3, this may not introduce a sizable
risk. However, many POP3 client implementations connect to
the POP3 server on a regular basis -- to check for new
mail. Further the interval of session initiation may be on
the order of five minutes. Hence, the risk of password
capture is greatly enhanced.
An alternate method of authentication is required which
provides for both origin authentication and replay
protection, but which does not involve sending a password
in the clear over the network. The APOP command provides
this functionality.
A POP3 server which implements the APOP command will
include a timestamp in its banner greeting. The syntax of
the timestamp corresponds to the `msg-id' in [RFC822], and
MUST be different each time the POP3 server issues a banner
greeting. For example, on a UNIX implementation in which a
separate UNIX process is used for each instance of a POP3
server, the syntax of the timestamp might be:
<process-ID.clock@hostname>
where `process-ID' is the decimal value of the process's
PID, clock is the decimal value of the system clock, and
hostname is the fully-qualified domain-name corresponding
to the host where the POP3 server is running.
The POP3 client makes note of this timestamp, and then
issues the APOP command. The `name' parameter has
identical semantics to the `name' parameter of the USER
command. The `digest' parameter is calculated by applying
the MD5 algorithm [RFC1321] to a string consisting of the
timestamp (including angle-brackets) followed by a shared
Myers & Rose Standards Track [Page 15]
RFC 1939 POP3 May 1996
secret. This shared secret is a string known only to the
POP3 client and server. Great care should be taken to
prevent unauthorized disclosure of the secret, as knowledge
of the secret will allow any entity to successfully
masquerade as the named user. The `digest' parameter
itself is a 16-octet value which is sent in hexadecimal
format, using lower-case ASCII characters.
When the POP3 server receives the APOP command, it verifies
the digest provided. If the digest is correct, the POP3
server issues a positive response, and the POP3 session
enters the TRANSACTION state. Otherwise, a negative
response is issued and the POP3 session remains in the
AUTHORIZATION state.
Note that as the length of the shared secret increases, so
does the difficulty of deriving it. As such, shared
secrets should be long strings (considerably longer than
the 8-character example shown below).
Possible Responses:
+OK maildrop locked and ready
-ERR permission denied
Examples:
S: +OK POP3 server ready <[email protected]>
C: APOP mrose c4c9334bac560ecc979e58001b3e22fb
S: +OK maildrop has 1 message (369 octets)
In this example, the shared secret is the string `tan-
staaf'. Hence, the MD5 algorithm is applied to the string
<[email protected]>tanstaaf
which produces a digest value of
c4c9334bac560ecc979e58001b3e22fb
8. Scaling and Operational Considerations
Since some of the optional features described above were added to the
POP3 protocol, experience has accumulated in using them in large-
scale commercial post office operations where most of the users are
unrelated to each other. In these situations and others, users and
vendors of POP3 clients have discovered that the combination of using
the UIDL command and not issuing the DELE command can provide a weak
version of the "maildrop as semi-permanent repository" functionality
normally associated with IMAP. Of course the other capabilities of
Myers & Rose Standards Track [Page 16]
RFC 1939 POP3 May 1996
IMAP, such as polling an existing connection for newly arrived
messages and supporting multiple folders on the server, are not
present in POP3.
When these facilities are used in this way by casual users, there has
been a tendency for already-read messages to accumulate on the server
without bound. This is clearly an undesirable behavior pattern from
the standpoint of the server operator. This situation is aggravated
by the fact that the limited capabilities of the POP3 do not permit
efficient handling of maildrops which have hundreds or thousands of
messages.
Consequently, it is recommended that operators of large-scale multi-
user servers, especially ones in which the user's only access to the
maildrop is via POP3, consider such options as:
* Imposing a per-user maildrop storage quota or the like.
A disadvantage to this option is that accumulation of messages may
result in the user's inability to receive new ones into the
maildrop. Sites which choose this option should be sure to inform
users of impending or current exhaustion of quota, perhaps by
inserting an appropriate message into the user's maildrop.
* Enforce a site policy regarding mail retention on the server.
Sites are free to establish local policy regarding the storage and
retention of messages on the server, both read and unread. For
example, a site might delete unread messages from the server after
60 days and delete read messages after 7 days. Such message
deletions are outside the scope of the POP3 protocol and are not
considered a protocol violation.
Server operators enforcing message deletion policies should take
care to make all users aware of the policies in force.
Clients must not assume that a site policy will automate message
deletions, and should continue to explicitly delete messages using
the DELE command when appropriate.
It should be noted that enforcing site message deletion policies
may be confusing to the user community, since their POP3 client
may contain configuration options to leave mail on the server
which will not in fact be supported by the server.
One special case of a site policy is that messages may only be
downloaded once from the server, and are deleted after this has
been accomplished. This could be implemented in POP3 server
Myers & Rose Standards Track [Page 17]
RFC 1939 POP3 May 1996
software by the following mechanism: "following a POP3 login by a
client which was ended by a QUIT, delete all messages downloaded
during the session with the RETR command". It is important not to
delete messages in the event of abnormal connection termination
(ie, if no QUIT was received from the client) because the client
may not have successfully received or stored the messages.
Servers implementing a download-and-delete policy may also wish to
disable or limit the optional TOP command, since it could be used
as an alternate mechanism to download entire messages.
9. POP3 Command Summary
Minimal POP3 Commands:
USER name valid in the AUTHORIZATION state
PASS string
QUIT
STAT valid in the TRANSACTION state
LIST [msg]
RETR msg
DELE msg
NOOP
RSET
QUIT
Optional POP3 Commands:
APOP name digest valid in the AUTHORIZATION state
TOP msg n valid in the TRANSACTION state
UIDL [msg]
POP3 Replies:
+OK
-ERR
Note that with the exception of the STAT, LIST, and UIDL commands,
the reply given by the POP3 server to any command is significant
only to "+OK" and "-ERR". Any text occurring after this reply
may be ignored by the client.
Myers & Rose Standards Track [Page 18]
RFC 1939 POP3 May 1996
10. Example POP3 Session
S: <wait for connection on TCP port 110>
C: <open connection>
S: +OK POP3 server ready <[email protected]>
C: APOP mrose c4c9334bac560ecc979e58001b3e22fb
S: +OK mrose's maildrop has 2 messages (320 octets)
C: STAT
S: +OK 2 320
C: LIST
S: +OK 2 messages (320 octets)
S: 1 120
S: 2 200
S: .
C: RETR 1
S: +OK 120 octets
S: <the POP3 server sends message 1>
S: .
C: DELE 1
S: +OK message 1 deleted
C: RETR 2
S: +OK 200 octets
S: <the POP3 server sends message 2>
S: .
C: DELE 2
S: +OK message 2 deleted
C: QUIT
S: +OK dewey POP3 server signing off (maildrop empty)
C: <close connection>
S: <wait for next connection>
11. Message Format
All messages transmitted during a POP3 session are assumed to conform
to the standard for the format of Internet text messages [RFC822].
It is important to note that the octet count for a message on the
server host may differ from the octet count assigned to that message
due to local conventions for designating end-of-line. Usually,
during the AUTHORIZATION state of the POP3 session, the POP3 server
can calculate the size of each message in octets when it opens the
maildrop. For example, if the POP3 server host internally represents
end-of-line as a single character, then the POP3 server simply counts
each occurrence of this character in a message as two octets. Note
that lines in the message which start with the termination octet need
not (and must not) be counted twice, since the POP3 client will
remove all byte-stuffed termination characters when it receives a
multi-line response.
Myers & Rose Standards Track [Page 19]
RFC 1939 POP3 May 1996
12. References
[RFC821] Postel, J., "Simple Mail Transfer Protocol", STD 10, RFC
821, USC/Information Sciences Institute, August 1982.
[RFC822] Crocker, D., "Standard for the Format of ARPA-Internet Text
Messages", STD 11, RFC 822, University of Delaware, August 1982.
[RFC1321] Rivest, R., "The MD5 Message-Digest Algorithm", RFC 1321,
MIT Laboratory for Computer Science, April 1992.
[RFC1730] Crispin, M., "Internet Message Access Protocol - Version
4", RFC 1730, University of Washington, December 1994.
[RFC1734] Myers, J., "POP3 AUTHentication command", RFC 1734,
Carnegie Mellon, December 1994.
13. Security Considerations
It is conjectured that use of the APOP command provides origin
identification and replay protection for a POP3 session.
Accordingly, a POP3 server which implements both the PASS and APOP
commands should not allow both methods of access for a given user;
that is, for a given mailbox name, either the USER/PASS command
sequence or the APOP command is allowed, but not both.
Further, note that as the length of the shared secret increases, so
does the difficulty of deriving it.
Servers that answer -ERR to the USER command are giving potential
attackers clues about which names are valid.
Use of the PASS command sends passwords in the clear over the
network.
Use of the RETR and TOP commands sends mail in the clear over the
network.
Otherwise, security issues are not discussed in this memo.
14. Acknowledgements
The POP family has a long and checkered history. Although primarily
a minor revision to RFC 1460, POP3 is based on the ideas presented in
RFCs 918, 937, and 1081.
In addition, Alfred Grimstad, Keith McCloghrie, and Neil Ostroff
provided significant comments on the APOP command.
Myers & Rose Standards Track [Page 20]
RFC 1939 POP3 May 1996
15. Authors' Addresses
John G. Myers
Carnegie-Mellon University
5000 Forbes Ave
Pittsburgh, PA 15213
EMail: [email protected]
Marshall T. Rose
Dover Beach Consulting, Inc.
420 Whisman Court
Mountain View, CA 94043-2186
EMail: [email protected]
Myers & Rose Standards Track [Page 21]
RFC 1939 POP3 May 1996
Appendix A. Differences from RFC 1725
This memo is a revision to RFC 1725, a Draft Standard. It makes the
following changes from that document:
- clarifies that command keywords are case insensitive.
- specifies that servers must send "+OK" and "-ERR" in
upper case.
- specifies that the initial greeting is a positive response,
instead of any string which should be a positive response.
- clarifies behavior for unimplemented commands.
- makes the USER and PASS commands optional.
- clarified the set of possible responses to the USER command.
- reverses the order of the examples in the USER and PASS
commands, to reduce confusion.
- clarifies that the PASS command may only be given immediately
after a successful USER command.
- clarified the persistence requirements of UIDs and added some
implementation notes.
- specifies a UID length limitation of one to 70 octets.
- specifies a status indicator length limitation
of 512 octets, including the CRLF.
- clarifies that LIST with no arguments on an empty mailbox
returns success.
- adds a reference from the LIST command to the Message Format
section
- clarifies the behavior of QUIT upon failure
- clarifies the security section to not imply the use of the
USER command with the APOP command.
- adds references to RFCs 1730 and 1734
- clarifies the method by which a UA may enter mail into the
transport system.
Myers & Rose Standards Track [Page 22]
RFC 1939 POP3 May 1996
- clarifies that the second argument to the TOP command is a
number of lines.
- changes the suggestion in the Security Considerations section
for a server to not accept both PASS and APOP for a given user
from a "must" to a "should".
- adds a section on scaling and operational considerations
Appendix B. Command Index
APOP ....................................................... 15
DELE ....................................................... 8
LIST ....................................................... 6
NOOP ....................................................... 9
PASS ....................................................... 14
QUIT ....................................................... 5
QUIT ....................................................... 10
RETR ....................................................... 8
RSET ....................................................... 9
STAT ....................................................... 6
TOP ........................................................ 11
UIDL ....................................................... 12
USER ....................................................... 13
Myers & Rose Standards Track [Page 23]
| {
"pile_set_name": "Github"
} |
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="apple-touch-icon-precomposed" sizes="144x144" href="../../assets/img/favicon-144.png">
<link rel="apple-touch-icon-precomposed" sizes="114x114" href="../../assets/img/favicon-144.png">
<link rel="apple-touch-icon-precomposed" sizes="72x72" href="../../assets/img/favicon-72.png">
<link rel="apple-touch-icon-precomposed" href="../../assets/img/favicon-32.png">
<link rel="shortcut icon" href="../../assets/img/favicon-32.png">
<link rel="stylesheet" href="../../assets/css/vk.css"/>
<link rel="stylesheet" href="../../assets/css/prism.css"/>
<vk-title>vkGetPhysicalDeviceExternalFenceProperties | NVK</vk-title>
<vk-centered>
<vk-navigation>
<vk-search-title>Search</vk-search-title>
<vk-search>
<input type="text" id="search" autocomplete="off" />
<vk-search-results>
<ol id="search-list">
<li id="no-search-results">No Results</li>
</ol>
</vk-search-results>
</vk-search>
<vk-section-title style="margin-top: 1em;">Categories</vk-section-title>
<vk-categories></vk-categories>
</vk-navigation>
<vk-struct>
<vk-name>vkGetPhysicalDeviceExternalFenceProperties</vk-name>
<vk-description>Function for querying external fence handle capabilities.</vk-description>
<vk-section-title>Syntax</vk-section-title>
<vk-syntax>
<pre><code class="language-js">void vkGetPhysicalDeviceExternalFenceProperties();
</code></pre>
</vk-syntax>
<vk-section-title>Parameters</vk-section-title>
<vk-properties>
<vk-property-entry>
<vk-property-parameter>physicalDevice</vk-property-parameter>
<vk-property-type type="object"><a href="../handles/VkPhysicalDevice.html">VkPhysicalDevice</a></vk-property-type>
<vk-property-description> is the physical device from which to query the fence capabilities.</vk-property-description>
</vk-property-entry>
<vk-property-entry>
<vk-property-parameter>pExternalFenceInfo</vk-property-parameter>
<vk-property-type type="object"><a href="../structs/VkPhysicalDeviceExternalFenceInfo.html">VkPhysicalDeviceExternalFenceInfo</a></vk-property-type>
<vk-property-description> is a reference to a <b><a href="../structs/VkPhysicalDeviceExternalFenceInfo.html">VkPhysicalDeviceExternalFenceInfo</a></b> structure describing the parameters that would be consumed by <b><a href="../calls/vkCreateFence.html">vkCreateFence</a></b>.</vk-property-description>
</vk-property-entry>
<vk-property-entry>
<vk-property-parameter>pExternalFenceProperties</vk-property-parameter>
<vk-property-type type="object"><a href="../structs/VkExternalFenceProperties.html">VkExternalFenceProperties</a></vk-property-type>
<vk-property-description> is a reference to a <b><a href="../structs/VkExternalFenceProperties.html">VkExternalFenceProperties</a></b> structure in which capabilities are returned.</vk-property-description>
</vk-property-entry>
</vk-properties>
</vk-struct>
</vk-centered>
<script>
const IS_ROOT = false;
</script>
<script type="text/javascript" src="../../assets/js/prism.min.js"></script>
<script type="text/javascript" src="../../assets/js/index.js"></script>
| {
"pile_set_name": "Github"
} |
<?php
use Illuminate\Database\Migrations\Migration;
use Illuminate\Database\Schema\Blueprint;
class CreateDataTypesTable extends Migration
{
/**
* Run the migrations.
*
* @return void
*/
public function up()
{
// Create table for storing roles
Schema::create('data_types', function (Blueprint $table) {
$table->increments('id');
$table->string('name')->unique();
$table->string('slug')->unique();
$table->string('display_name_singular');
$table->string('display_name_plural');
$table->string('icon')->nullable();
$table->string('model_name')->nullable();
$table->string('description')->nullable();
$table->boolean('generate_permissions')->default(false);
$table->timestamps();
});
// Create table for storing roles
Schema::create('data_rows', function (Blueprint $table) {
$table->increments('id');
$table->integer('data_type_id')->unsigned();
$table->string('field');
$table->string('type');
$table->string('display_name');
$table->boolean('required')->default(false);
$table->boolean('browse')->default(true);
$table->boolean('read')->default(true);
$table->boolean('edit')->default(true);
$table->boolean('add')->default(true);
$table->boolean('delete')->default(true);
$table->text('details')->nullable();
$table
->foreign('data_type_id')
->references('id')
->on('data_types')
->onUpdate('cascade')
->onDelete('cascade');
});
}
/**
* Reverse the migrations.
*
* @return void
*/
public function down()
{
Schema::dropIfExists('data_rows');
Schema::dropIfExists('data_types');
}
}
| {
"pile_set_name": "Github"
} |
https://github.com/tensorflow/models/blob/master/tutorials/image/cifar10_estimator/
CIFAR-10 is a common benchmark in machine learning for image recognition.
http://www.cs.toronto.edu/~kriz/cifar.html
Code in this directory focuses on how to use TensorFlow Estimators to train and
evaluate a CIFAR-10 ResNet model on:
* A single host with one CPU;
* A single host with multiple GPUs;
* Multiple hosts with CPU or multiple GPUs;
Before trying to run the model we highly encourage you to read all the README.
## Prerequisite
1. [Install](https://www.tensorflow.org/install/) TensorFlow version 1.2.1 or
later.
2. Download the CIFAR-10 dataset and generate TFRecord files using the provided
script. The script and associated command below will download the CIFAR-10
dataset and then generate a TFRecord for the training, validation, and
evaluation datasets.
```shell
python generate_cifar10_tfrecords.py --data-dir=${PWD}/cifar-10-data
```
After running the command above, you should see the following files in the
--data-dir (```ls -R cifar-10-data```):
* train.tfrecords
* validation.tfrecords
* eval.tfrecords
## Training on a single machine with GPUs or CPU
Run the training on CPU only. After training, it runs the evaluation.
```
python cifar10_main.py --data-dir=${PWD}/cifar-10-data \
--job-dir=/tmp/cifar10 \
--num-gpus=0 \
--train-steps=1000
```
Run the model on 2 GPUs using CPU as parameter server. After training, it runs
the evaluation.
```
python cifar10_main.py --data-dir=${PWD}/cifar-10-data \
--job-dir=/tmp/cifar10 \
--num-gpus=2 \
--train-steps=1000
```
Run the model on 2 GPUs using GPU as parameter server.
It will run an experiment, which for local setting basically means it will run
stop training
a couple of times to perform evaluation.
```
python cifar10_main.py --data-dir=${PWD}/cifar-10-data \
--job-dir=/tmp/cifar10 \
--variable-strategy GPU \
--num-gpus=2 \
```
There are more command line flags to play with; run
`python cifar10_main.py --help` for details.
## Run distributed training
### (Optional) Running on Google Cloud Machine Learning Engine
This example can be run on Google Cloud Machine Learning Engine (ML Engine),
which will configure the environment and take care of running workers,
parameters servers, and masters in a fault tolerant way.
To install the command line tool, and set up a project and billing, see the
quickstart [here](https://cloud.google.com/ml-engine/docs/quickstarts/command-line).
You'll also need a Google Cloud Storage bucket for the data. If you followed the
instructions above, you can just run:
```
MY_BUCKET=gs://<my-bucket-name>
gsutil cp -r ${PWD}/cifar-10-data $MY_BUCKET/
```
Then run the following command from the `tutorials/image` directory of this
repository (the parent directory of this README):
```
gcloud ml-engine jobs submit training cifarmultigpu \
--runtime-version 1.2 \
--job-dir=$MY_BUCKET/model_dirs/cifarmultigpu \
--config cifar10_estimator/cmle_config.yaml \
--package-path cifar10_estimator/ \
--module-name cifar10_estimator.cifar10_main \
-- \
--data-dir=$MY_BUCKET/cifar-10-data \
--num-gpus=4 \
--train-steps=1000
```
### Set TF_CONFIG
Considering that you already have multiple hosts configured, all you need is a
`TF_CONFIG` environment variable on each host. You can set up the hosts manually
or check [tensorflow/ecosystem](https://github.com/tensorflow/ecosystem) for
instructions about how to set up a Cluster.
The `TF_CONFIG` will be used by the `RunConfig` to know the existing hosts and
their task: `master`, `ps` or `worker`.
Here's an example of `TF_CONFIG`.
```python
cluster = {'master': ['master-ip:8000'],
'ps': ['ps-ip:8000'],
'worker': ['worker-ip:8000']}
TF_CONFIG = json.dumps(
{'cluster': cluster,
'task': {'type': master, 'index': 0},
'model_dir': 'gs://<bucket_path>/<dir_path>',
'environment': 'cloud'
})
```
*Cluster*
A cluster spec, which is basically a dictionary that describes all of the tasks
in the cluster. More about it [here](https://www.tensorflow.org/deploy/distributed).
In this cluster spec we are defining a cluster with 1 master, 1 ps and 1 worker.
* `ps`: saves the parameters among all workers. All workers can
read/write/update the parameters for model via ps. As some models are
extremely large the parameters are shared among the ps (each ps stores a
subset).
* `worker`: does the training.
* `master`: basically a special worker, it does training, but also restores and
saves checkpoints and do evaluation.
*Task*
The Task defines what is the role of the current node, for this example the node
is the master on index 0 on the cluster spec, the task will be different for
each node. An example of the `TF_CONFIG` for a worker would be:
```python
cluster = {'master': ['master-ip:8000'],
'ps': ['ps-ip:8000'],
'worker': ['worker-ip:8000']}
TF_CONFIG = json.dumps(
{'cluster': cluster,
'task': {'type': worker, 'index': 0},
'model_dir': 'gs://<bucket_path>/<dir_path>',
'environment': 'cloud'
})
```
*Model_dir*
This is the path where the master will save the checkpoints, graph and
TensorBoard files. For a multi host environment you may want to use a
Distributed File System, Google Storage and DFS are supported.
*Environment*
By the default environment is *local*, for a distributed setting we need to
change it to *cloud*.
### Running script
Once you have a `TF_CONFIG` configured properly on each host you're ready to run
on distributed settings.
#### Master
Run this on master:
Runs an Experiment in sync mode on 4 GPUs using CPU as parameter server for
40000 steps. It will run evaluation a couple of times during training. The
num_workers arugument is used only to update the learning rate correctly. Make
sure the model_dir is the same as defined on the TF_CONFIG.
```shell
python cifar10_main.py --data-dir=gs://path/cifar-10-data \
--job-dir=gs://path/model_dir/ \
--num-gpus=4 \
--train-steps=40000 \
--sync \
--num-workers=2
```
*Output:*
```shell
INFO:tensorflow:Using model_dir in TF_CONFIG: gs://path/model_dir/
INFO:tensorflow:Using config: {'_save_checkpoints_secs': 600, '_num_ps_replicas': 1, '_keep_checkpoint_max': 5, '_task_type': u'master', '_is_chief': True, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x7fd16fb2be10>, '_model_dir': 'gs://path/model_dir/', '_save_checkpoints_steps': None, '_keep_checkpoint_every_n_hours': 10000, '_session_config': intra_op_parallelism_threads: 1
gpu_options {
}
allow_soft_placement: true
, '_tf_random_seed': None, '_environment': u'cloud', '_num_worker_replicas': 1, '_task_id': 0, '_save_summary_steps': 100, '_tf_config': gpu_options {
per_process_gpu_memory_fraction: 1.0
}
, '_evaluation_master': '', '_master': u'grpc://master-ip:8000'}
...
2017-08-01 19:59:26.496208: I tensorflow/core/common_runtime/gpu/gpu_device.cc:940] Found device 0 with properties:
name: Tesla K80
major: 3 minor: 7 memoryClockRate (GHz) 0.8235
pciBusID 0000:00:04.0
Total memory: 11.17GiB
Free memory: 11.09GiB
2017-08-01 19:59:26.775660: I tensorflow/core/common_runtime/gpu/gpu_device.cc:940] Found device 1 with properties:
name: Tesla K80
major: 3 minor: 7 memoryClockRate (GHz) 0.8235
pciBusID 0000:00:05.0
Total memory: 11.17GiB
Free memory: 11.10GiB
...
2017-08-01 19:59:29.675171: I tensorflow/core/distributed_runtime/rpc/grpc_server_lib.cc:316] Started server with target: grpc://localhost:8000
INFO:tensorflow:image after unit resnet/tower_0/stage/residual_v1/: (?, 16, 32, 32)
INFO:tensorflow:image after unit resnet/tower_0/stage/residual_v1_1/: (?, 16, 32, 32)
INFO:tensorflow:image after unit resnet/tower_0/stage/residual_v1_2/: (?, 16, 32, 32)
INFO:tensorflow:image after unit resnet/tower_0/stage/residual_v1_3/: (?, 16, 32, 32)
INFO:tensorflow:image after unit resnet/tower_0/stage/residual_v1_4/: (?, 16, 32, 32)
INFO:tensorflow:image after unit resnet/tower_0/stage/residual_v1_5/: (?, 16, 32, 32)
INFO:tensorflow:image after unit resnet/tower_0/stage/residual_v1_6/: (?, 16, 32, 32)
INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1/avg_pool/: (?, 16, 16, 16)
INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1/: (?, 32, 16, 16)
INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_1/: (?, 32, 16, 16)
INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_2/: (?, 32, 16, 16)
INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_3/: (?, 32, 16, 16)
INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_4/: (?, 32, 16, 16)
INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1/: (?, 32, 16, 16)
INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_1/: (?, 32, 16, 16)
INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_2/: (?, 32, 16, 16)
INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_3/: (?, 32, 16, 16)
INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_4/: (?, 32, 16, 16)
INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_5/: (?, 32, 16, 16)
INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_6/: (?, 32, 16, 16)
INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1/avg_pool/: (?, 32, 8, 8)
INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1/: (?, 64, 8, 8)
INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1_1/: (?, 64, 8, 8)
INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1_2/: (?, 64, 8, 8)
INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1_3/: (?, 64, 8, 8)
INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1_4/: (?, 64, 8, 8)
INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1_5/: (?, 64, 8, 8)
INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1_6/: (?, 64, 8, 8)
INFO:tensorflow:image after unit resnet/tower_0/global_avg_pool/: (?, 64)
INFO:tensorflow:image after unit resnet/tower_0/fully_connected/: (?, 11)
INFO:tensorflow:SyncReplicasV2: replicas_to_aggregate=1; total_num_replicas=1
INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Restoring parameters from gs://path/model_dir/model.ckpt-0
2017-08-01 19:59:37.560775: I tensorflow/core/distributed_runtime/master_session.cc:999] Start master session 156fcb55fe6648d6 with config:
intra_op_parallelism_threads: 1
gpu_options {
per_process_gpu_memory_fraction: 1
}
allow_soft_placement: true
INFO:tensorflow:Saving checkpoints for 1 into gs://path/model_dir/model.ckpt.
INFO:tensorflow:loss = 1.20682, step = 1
INFO:tensorflow:loss = 1.20682, learning_rate = 0.1
INFO:tensorflow:image after unit resnet/tower_0/stage/residual_v1/: (?, 16, 32, 32)
INFO:tensorflow:image after unit resnet/tower_0/stage/residual_v1_1/: (?, 16, 32, 32)
INFO:tensorflow:image after unit resnet/tower_0/stage/residual_v1_2/: (?, 16, 32, 32)
INFO:tensorflow:image after unit resnet/tower_0/stage/residual_v1_3/: (?, 16, 32, 32)
INFO:tensorflow:image after unit resnet/tower_0/stage/residual_v1_4/: (?, 16, 32, 32)
INFO:tensorflow:image after unit resnet/tower_0/stage/residual_v1_5/: (?, 16, 32, 32)
INFO:tensorflow:image after unit resnet/tower_0/stage/residual_v1_6/: (?, 16, 32, 32)
INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1/avg_pool/: (?, 16, 16, 16)
INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1/: (?, 32, 16, 16)
INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_1/: (?, 32, 16, 16)
INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_2/: (?, 32, 16, 16)
INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_3/: (?, 32, 16, 16)
INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_4/: (?, 32, 16, 16)
INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_5/: (?, 32, 16, 16)
INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_6/: (?, 32, 16, 16)
INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1/avg_pool/: (?, 32, 8, 8)
INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1/: (?, 64, 8, 8)
INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1_1/: (?, 64, 8, 8)
INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1_2/: (?, 64, 8, 8)
INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1_3/: (?, 64, 8, 8)
INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1_4/: (?, 64, 8, 8)
INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1_5/: (?, 64, 8, 8)
INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1_6/: (?, 64, 8, 8)
INFO:tensorflow:image after unit resnet/tower_0/global_avg_pool/: (?, 64)
INFO:tensorflow:image after unit resnet/tower_0/fully_connected/: (?, 11)
INFO:tensorflow:SyncReplicasV2: replicas_to_aggregate=2; total_num_replicas=2
INFO:tensorflow:Starting evaluation at 2017-08-01-20:00:14
2017-08-01 20:00:15.745881: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1030] Creating TensorFlow device (/gpu:0) -> (device: 0, name: Tesla K80, pci bus id: 0000:00:04.0)
2017-08-01 20:00:15.745949: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1030] Creating TensorFlow device (/gpu:1) -> (device: 1, name: Tesla K80, pci bus id: 0000:00:05.0)
2017-08-01 20:00:15.745958: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1030] Creating TensorFlow device (/gpu:2) -> (device: 2, name: Tesla K80, pci bus id: 0000:00:06.0)
2017-08-01 20:00:15.745964: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1030] Creating TensorFlow device (/gpu:3) -> (device: 3, name: Tesla K80, pci bus id: 0000:00:07.0)
2017-08-01 20:00:15.745969: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1030] Creating TensorFlow device (/gpu:4) -> (device: 4, name: Tesla K80, pci bus id: 0000:00:08.0)
2017-08-01 20:00:15.745975: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1030] Creating TensorFlow device (/gpu:5) -> (device: 5, name: Tesla K80, pci bus id: 0000:00:09.0)
2017-08-01 20:00:15.745987: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1030] Creating TensorFlow device (/gpu:6) -> (device: 6, name: Tesla K80, pci bus id: 0000:00:0a.0)
2017-08-01 20:00:15.745997: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1030] Creating TensorFlow device (/gpu:7) -> (device: 7, name: Tesla K80, pci bus id: 0000:00:0b.0)
INFO:tensorflow:Restoring parameters from gs://path/model_dir/model.ckpt-10023
INFO:tensorflow:Evaluation [1/100]
INFO:tensorflow:Evaluation [2/100]
INFO:tensorflow:Evaluation [3/100]
INFO:tensorflow:Evaluation [4/100]
INFO:tensorflow:Evaluation [5/100]
INFO:tensorflow:Evaluation [6/100]
INFO:tensorflow:Evaluation [7/100]
INFO:tensorflow:Evaluation [8/100]
INFO:tensorflow:Evaluation [9/100]
INFO:tensorflow:Evaluation [10/100]
INFO:tensorflow:Evaluation [11/100]
INFO:tensorflow:Evaluation [12/100]
INFO:tensorflow:Evaluation [13/100]
...
INFO:tensorflow:Evaluation [100/100]
INFO:tensorflow:Finished evaluation at 2017-08-01-20:00:31
INFO:tensorflow:Saving dict for global step 1: accuracy = 0.0994, global_step = 1, loss = 630.425
```
#### Worker
Run this on worker:
Runs an Experiment in sync mode on 4 GPUs using CPU as parameter server for
40000 steps. It will run evaluation a couple of times during training. Make sure
the model_dir is the same as defined on the TF_CONFIG.
```shell
python cifar10_main.py --data-dir=gs://path/cifar-10-data \
--job-dir=gs://path/model_dir/ \
--num-gpus=4 \
--train-steps=40000 \
--sync
```
*Output:*
```shell
INFO:tensorflow:Using model_dir in TF_CONFIG: gs://path/model_dir/
INFO:tensorflow:Using config: {'_save_checkpoints_secs': 600,
'_num_ps_replicas': 1, '_keep_checkpoint_max': 5, '_task_type': u'worker',
'_is_chief': False, '_cluster_spec':
<tensorflow.python.training.server_lib.ClusterSpec object at 0x7f6918438e10>,
'_model_dir': 'gs://<path>/model_dir/',
'_save_checkpoints_steps': None, '_keep_checkpoint_every_n_hours': 10000,
'_session_config': intra_op_parallelism_threads: 1
gpu_options {
}
allow_soft_placement: true
, '_tf_random_seed': None, '_environment': u'cloud', '_num_worker_replicas': 1,
'_task_id': 0, '_save_summary_steps': 100, '_tf_config': gpu_options {
per_process_gpu_memory_fraction: 1.0
}
...
2017-08-01 19:59:26.496208: I tensorflow/core/common_runtime/gpu/gpu_device.cc:940] Found device 0 with properties:
name: Tesla K80
major: 3 minor: 7 memoryClockRate (GHz) 0.8235
pciBusID 0000:00:04.0
Total memory: 11.17GiB
Free memory: 11.09GiB
2017-08-01 19:59:26.775660: I tensorflow/core/common_runtime/gpu/gpu_device.cc:940] Found device 1 with properties:
name: Tesla K80
major: 3 minor: 7 memoryClockRate (GHz) 0.8235
pciBusID 0000:00:05.0
Total memory: 11.17GiB
Free memory: 11.10GiB
...
2017-08-01 19:59:29.675171: I tensorflow/core/distributed_runtime/rpc/grpc_server_lib.cc:316] Started server with target: grpc://localhost:8000
INFO:tensorflow:image after unit resnet/tower_0/stage/residual_v1/: (?, 16, 32, 32)
INFO:tensorflow:image after unit resnet/tower_0/stage/residual_v1_1/: (?, 16, 32, 32)
INFO:tensorflow:image after unit resnet/tower_0/stage/residual_v1_2/: (?, 16, 32, 32)
INFO:tensorflow:image after unit resnet/tower_0/stage/residual_v1_3/: (?, 16, 32, 32)
INFO:tensorflow:image after unit resnet/tower_0/stage/residual_v1_4/: (?, 16, 32, 32)
INFO:tensorflow:image after unit resnet/tower_0/stage/residual_v1_5/: (?, 16, 32, 32)
INFO:tensorflow:image after unit resnet/tower_0/stage/residual_v1_6/: (?, 16, 32, 32)
INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1/avg_pool/: (?, 16, 16, 16)
INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1/: (?, 32, 16, 16)
INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_1/: (?, 32, 16, 16)
INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_2/: (?, 32, 16, 16)
INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_3/: (?, 32, 16, 16)
INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_4/: (?, 32, 16, 16)
INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1/: (?, 32, 16, 16)
INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_1/: (?, 32, 16, 16)
INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_2/: (?, 32, 16, 16)
INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_3/: (?, 32, 16, 16)
INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_4/: (?, 32, 16, 16)
INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_5/: (?, 32, 16, 16)
INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_6/: (?, 32, 16, 16)
INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1/avg_pool/: (?, 32, 8, 8)
INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1/: (?, 64, 8, 8)
INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1_1/: (?, 64, 8, 8)
INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1_2/: (?, 64, 8, 8)
INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1_3/: (?, 64, 8, 8)
INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1_4/: (?, 64, 8, 8)
INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1_5/: (?, 64, 8, 8)
INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1_6/: (?, 64, 8, 8)
INFO:tensorflow:image after unit resnet/tower_0/global_avg_pool/: (?, 64)
INFO:tensorflow:image after unit resnet/tower_0/fully_connected/: (?, 11)
INFO:tensorflow:SyncReplicasV2: replicas_to_aggregate=2; total_num_replicas=2
INFO:tensorflow:Create CheckpointSaverHook.
2017-07-31 22:38:04.629150: I
tensorflow/core/distributed_runtime/master.cc:209] CreateSession still waiting
for response from worker: /job:master/replica:0/task:0
2017-07-31 22:38:09.263492: I
tensorflow/core/distributed_runtime/master_session.cc:999] Start master
session cc58f93b1e259b0c with config:
intra_op_parallelism_threads: 1
gpu_options {
per_process_gpu_memory_fraction: 1
}
allow_soft_placement: true
INFO:tensorflow:loss = 5.82382, step = 0
INFO:tensorflow:loss = 5.82382, learning_rate = 0.8
INFO:tensorflow:Average examples/sec: 1116.92 (1116.92), step = 10
INFO:tensorflow:Average examples/sec: 1233.73 (1377.83), step = 20
INFO:tensorflow:Average examples/sec: 1485.43 (2509.3), step = 30
INFO:tensorflow:Average examples/sec: 1680.27 (2770.39), step = 40
INFO:tensorflow:Average examples/sec: 1825.38 (2788.78), step = 50
INFO:tensorflow:Average examples/sec: 1929.32 (2697.27), step = 60
INFO:tensorflow:Average examples/sec: 2015.17 (2749.05), step = 70
INFO:tensorflow:loss = 37.6272, step = 79 (19.554 sec)
INFO:tensorflow:loss = 37.6272, learning_rate = 0.8 (19.554 sec)
INFO:tensorflow:Average examples/sec: 2074.92 (2618.36), step = 80
INFO:tensorflow:Average examples/sec: 2132.71 (2744.13), step = 90
INFO:tensorflow:Average examples/sec: 2183.38 (2777.21), step = 100
INFO:tensorflow:Average examples/sec: 2224.4 (2739.03), step = 110
INFO:tensorflow:Average examples/sec: 2240.28 (2431.26), step = 120
INFO:tensorflow:Average examples/sec: 2272.12 (2739.32), step = 130
INFO:tensorflow:Average examples/sec: 2300.68 (2750.03), step = 140
INFO:tensorflow:Average examples/sec: 2325.81 (2745.63), step = 150
INFO:tensorflow:Average examples/sec: 2347.14 (2721.53), step = 160
INFO:tensorflow:Average examples/sec: 2367.74 (2754.54), step = 170
INFO:tensorflow:loss = 27.8453, step = 179 (18.893 sec)
...
```
#### PS
Run this on ps:
The ps will not do training so most of the arguments won't affect the execution
```shell
python cifar10_main.py --job-dir=gs://path/model_dir/
```
*Output:*
```shell
INFO:tensorflow:Using model_dir in TF_CONFIG: gs://path/model_dir/
INFO:tensorflow:Using config: {'_save_checkpoints_secs': 600, '_num_ps_replicas': 1, '_keep_checkpoint_max': 5, '_task_type': u'ps', '_is_chief': False, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x7f48f1addf90>, '_model_dir': 'gs://path/model_dir/', '_save_checkpoints_steps': None, '_keep_checkpoint_every_n_hours': 10000, '_session_config': intra_op_parallelism_threads: 1
gpu_options {
}
allow_soft_placement: true
, '_tf_random_seed': None, '_environment': u'cloud', '_num_worker_replicas': 1, '_task_id': 0, '_save_summary_steps': 100, '_tf_config': gpu_options {
per_process_gpu_memory_fraction: 1.0
}
, '_evaluation_master': '', '_master': u'grpc://master-ip:8000'}
2017-07-31 22:54:58.928088: I tensorflow/core/distributed_runtime/rpc/grpc_channel.cc:215] Initialize GrpcChannelCache for job master -> {0 -> master-ip:8000}
2017-07-31 22:54:58.928153: I tensorflow/core/distributed_runtime/rpc/grpc_channel.cc:215] Initialize GrpcChannelCache for job ps -> {0 -> localhost:8000}
2017-07-31 22:54:58.928160: I tensorflow/core/distributed_runtime/rpc/grpc_channel.cc:215] Initialize GrpcChannelCache for job worker -> {0 -> worker-ip:8000}
2017-07-31 22:54:58.929873: I tensorflow/core/distributed_runtime/rpc/grpc_server_lib.cc:316] Started server with target: grpc://localhost:8000
```
## Visualizing results with TensorBoard
When using Estimators you can also visualize your data in TensorBoard, with no
changes in your code. You can use TensorBoard to visualize your TensorFlow
graph, plot quantitative metrics about the execution of your graph, and show
additional data like images that pass through it.
You'll see something similar to this if you "point" TensorBoard to the
`job dir` parameter you used to train or evaluate your model.
Check TensorBoard during training or after it. Just point TensorBoard to the
model_dir you chose on the previous step.
```shell
tensorboard --log-dir="<job dir>"
```
## Warnings
When runninng `cifar10_main.py` with `--sync` argument you may see an error
similar to:
```python
File "cifar10_main.py", line 538, in <module>
tf.app.run()
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/platform/app.py", line 48, in run
_sys.exit(main(_sys.argv[:1] + flags_passthrough))
File "cifar10_main.py", line 518, in main
hooks), run_config=config)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/learn_runner.py", line 210, in run
return _execute_schedule(experiment, schedule)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/learn_runner.py", line 47, in _execute_schedule
return task()
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/experiment.py", line 501, in train_and_evaluate
hooks=self._eval_hooks)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/experiment.py", line 681, in _call_evaluate
hooks=hooks)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/estimator/estimator.py", line 292, in evaluate
name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/estimator/estimator.py", line 638, in _evaluate_model
features, labels, model_fn_lib.ModeKeys.EVAL)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/estimator/estimator.py", line 545, in _call_model_fn
features=features, labels=labels, **kwargs)
File "cifar10_main.py", line 331, in _resnet_model_fn
gradvars, global_step=tf.train.get_global_step())
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/sync_replicas_optimizer.py", line 252, in apply_gradients
variables.global_variables())
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/util/tf_should_use.py", line 170, in wrapped
return _add_should_use_warning(fn(*args, **kwargs))
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/util/tf_should_use.py", line 139, in _add_should_use_warning
wrapped = TFShouldUseWarningWrapper(x)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/util/tf_should_use.py", line 96, in __init__
stack = [s.strip() for s in traceback.format_stack()]
```
This should not affect your training, and should be fixed on the next releases.
| {
"pile_set_name": "Github"
} |
// ©Mark Heath 2006 ([email protected])
// You are free to use this code for your own projects.
// Please consider giving credit somewhere in your app to this code if you use it
// Please do not redistribute this code without my permission
// Please get in touch and let me know of any bugs you find, enhancements you would like,
// and apps you have written
using System;
using System.IO;
using System.Text;
namespace NAudio.SoundFont
{
/// <summary>
/// Instrument Builder
/// </summary>
internal class InstrumentBuilder : StructureBuilder
{
private Instrument lastInstrument = null;
public override object Read(BinaryReader br)
{
Instrument i = new Instrument();
string s = Encoding.ASCII.GetString(br.ReadBytes(20));
if(s.IndexOf('\0') >= 0)
{
s = s.Substring(0,s.IndexOf('\0'));
}
i.Name = s;
i.startInstrumentZoneIndex = br.ReadUInt16();
if(lastInstrument != null)
{
lastInstrument.endInstrumentZoneIndex = (ushort) (i.startInstrumentZoneIndex - 1);
}
data.Add(i);
lastInstrument = i;
return i;
}
public override void Write(BinaryWriter bw,object o)
{
Instrument i = (Instrument) o;
//bw.Write(p.---);
}
public override int Length
{
get
{
return 22;
}
}
public void LoadZones(Zone[] zones)
{
// don't do the last preset, which is simply EOP
for(int instrument = 0; instrument < data.Count - 1; instrument++)
{
Instrument i = (Instrument) data[instrument];
i.Zones = new Zone[i.endInstrumentZoneIndex - i.startInstrumentZoneIndex + 1];
Array.Copy(zones,i.startInstrumentZoneIndex,i.Zones,0,i.Zones.Length);
}
// we can get rid of the EOP record now
data.RemoveAt(data.Count - 1);
}
public Instrument[] Instruments
{
get
{
return (Instrument[]) data.ToArray(typeof(Instrument));
}
}
}
} | {
"pile_set_name": "Github"
} |
#! /bin/sh /usr/share/dpatch/dpatch-run
## 08_openssl-0.9.8.dpatch by <[email protected]>
##
## All lines beginning with `## DP:' are a description of the patch.
## DP: No description.
@DPATCH@
diff -urNad dsniff-2.4b1~/ssh.c dsniff-2.4b1/ssh.c
--- dsniff-2.4b1~/ssh.c 2006-10-12 13:21:57.000000000 -0700
+++ dsniff-2.4b1/ssh.c 2006-10-12 13:22:46.441893077 -0700
@@ -16,6 +16,7 @@
#include <openssl/ssl.h>
#include <openssl/err.h>
#include <openssl/rand.h>
+#include <openssl/md5.h>
#include <err.h>
#include <errno.h>
| {
"pile_set_name": "Github"
} |
import { combineReducers, createStore } from "redux";
import { INITIAL_STATE, reducers } from "common/Reducers.jsm";
import { mount, shallow } from "enzyme";
import {
PocketLoggedInCta,
_PocketLoggedInCta as PocketLoggedInCtaRaw,
} from "content-src/components/PocketLoggedInCta/PocketLoggedInCta";
import { Provider } from "react-redux";
import React from "react";
function mountSectionWithProps(props) {
const store = createStore(combineReducers(reducers), INITIAL_STATE);
return mount(
<Provider store={store}>
<PocketLoggedInCta {...props} />
</Provider>
);
}
describe("<PocketLoggedInCta>", () => {
it("should render a PocketLoggedInCta element", () => {
const wrapper = mountSectionWithProps({});
assert.ok(wrapper.exists());
});
it("should render Fluent spans when rendered without props", () => {
const wrapper = mountSectionWithProps({});
const message = wrapper.find("span[data-l10n-id]");
assert.lengthOf(message, 2);
});
it("should not render Fluent spans when rendered with props", () => {
const wrapper = shallow(
<PocketLoggedInCtaRaw
Pocket={{
pocketCta: {
ctaButton: "button",
ctaText: "text",
},
}}
/>
);
const message = wrapper.find("span[data-l10n-id]");
assert.lengthOf(message, 0);
});
});
| {
"pile_set_name": "Github"
} |
// This file is part of Moodle - http://moodle.org/
//
// Moodle is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// Moodle is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with Moodle. If not, see <http://www.gnu.org/licenses/>.
/**
* A javascript module to retrieve calendar events from the server.
*
* @module block_timeline/calendar_events_repository
* @copyright 2018 Ryan Wyllie <[email protected]>
* @license http://www.gnu.org/copyleft/gpl.html GNU GPL v3 or later
*/
define(['jquery', 'core/ajax', 'core/notification'], function($, Ajax, Notification) {
var DEFAULT_LIMIT = 20;
/**
* Retrieve a list of calendar events for the logged in user for the
* given course.
*
* Valid args are:
* int courseid Only get events for this course
* int starttime Only get events after this time
* int endtime Only get events before this time
* int limit Limit the number of results returned
* int aftereventid Offset the result set from the given id
*
* @method queryByCourse
* @param {object} args The request arguments
* @return {promise} Resolved with an array of the calendar events
*/
var queryByCourse = function(args) {
if (!args.hasOwnProperty('limit')) {
args.limit = DEFAULT_LIMIT;
}
args.limitnum = args.limit;
delete args.limit;
if (args.hasOwnProperty('starttime')) {
args.timesortfrom = args.starttime;
delete args.starttime;
}
if (args.hasOwnProperty('endtime')) {
args.timesortto = args.endtime;
delete args.endtime;
}
var request = {
methodname: 'core_calendar_get_action_events_by_course',
args: args
};
var promise = Ajax.call([request])[0];
promise.fail(Notification.exception);
return promise;
};
/**
* Retrieve a list of calendar events for the given courses for the
* logged in user.
*
* Valid args are:
* array courseids Get events for these courses
* int starttime Only get events after this time
* int endtime Only get events before this time
* int limit Limit the number of results returned
*
* @method queryByCourses
* @param {object} args The request arguments
* @return {promise} Resolved with an array of the calendar events
*/
var queryByCourses = function(args) {
if (!args.hasOwnProperty('limit')) {
// This is intentionally smaller than the default limit.
args.limit = 10;
}
args.limitnum = args.limit;
delete args.limit;
if (args.hasOwnProperty('starttime')) {
args.timesortfrom = args.starttime;
delete args.starttime;
}
if (args.hasOwnProperty('endtime')) {
args.timesortto = args.endtime;
delete args.endtime;
}
var request = {
methodname: 'core_calendar_get_action_events_by_courses',
args: args
};
var promise = Ajax.call([request])[0];
promise.fail(Notification.exception);
return promise;
};
/**
* Retrieve a list of calendar events for the logged in user after the given
* time.
*
* Valid args are:
* int starttime Only get events after this time
* int endtime Only get events before this time
* int limit Limit the number of results returned
* int aftereventid Offset the result set from the given id
*
* @method queryByTime
* @param {object} args The request arguments
* @return {promise} Resolved with an array of the calendar events
*/
var queryByTime = function(args) {
if (!args.hasOwnProperty('limit')) {
args.limit = DEFAULT_LIMIT;
}
args.limitnum = args.limit;
delete args.limit;
if (args.hasOwnProperty('starttime')) {
args.timesortfrom = args.starttime;
delete args.starttime;
}
if (args.hasOwnProperty('endtime')) {
args.timesortto = args.endtime;
delete args.endtime;
}
// Don't show events related to courses that the user is suspended in.
args.limittononsuspendedevents = true;
var request = {
methodname: 'core_calendar_get_action_events_by_timesort',
args: args
};
var promise = Ajax.call([request])[0];
promise.fail(Notification.exception);
return promise;
};
return {
queryByTime: queryByTime,
queryByCourse: queryByCourse,
queryByCourses: queryByCourses,
};
});
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<setup:Project
xmi:version="2.0"
xmlns:xmi="http://www.omg.org/XMI"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:jdt="http://www.eclipse.org/oomph/setup/jdt/1.0"
xmlns:maven="http://www.eclipse.org/oomph/setup/maven/1.0"
xmlns:predicates="http://www.eclipse.org/oomph/predicates/1.0"
xmlns:setup="http://www.eclipse.org/oomph/setup/1.0"
xmlns:setup.p2="http://www.eclipse.org/oomph/setup/p2/1.0"
xmlns:setup.workingsets="http://www.eclipse.org/oomph/setup/workingsets/1.0"
xmlns:workingsets="http://www.eclipse.org/oomph/workingsets/1.0"
xsi:schemaLocation="http://www.eclipse.org/oomph/setup/jdt/1.0 http://git.eclipse.org/c/oomph/org.eclipse.oomph.git/plain/setups/models/JDT.ecore http://www.eclipse.org/oomph/setup/maven/1.0 http://git.eclipse.org/c/oomph/org.eclipse.oomph.git/plain/setups/models/Maven.ecore http://www.eclipse.org/oomph/predicates/1.0 http://git.eclipse.org/c/oomph/org.eclipse.oomph.git/plain/setups/models/Predicates.ecore http://www.eclipse.org/oomph/setup/workingsets/1.0 http://git.eclipse.org/c/oomph/org.eclipse.oomph.git/plain/setups/models/SetupWorkingSets.ecore http://www.eclipse.org/oomph/workingsets/1.0 http://git.eclipse.org/c/oomph/org.eclipse.oomph.git/plain/setups/models/WorkingSets.ecore"
name="spring.boot"
label="Spring Boot">
<setupTask
xsi:type="setup:VariableTask"
type="FOLDER"
name="checkout.location"
defaultValue=""
storageURI="scope://Workspace"
label="Checkout Location"/>
<setupTask
xsi:type="jdt:JRETask"
version="JavaSE-1.8"
location="${jre.location-1.8}">
<description>
Define the JRE needed to compile and run the Java
projects of ${scope.project.label}
</description>
</setupTask>
<setupTask
xsi:type="setup:EclipseIniTask"
option="-Xmx"
value="2048m"
vm="true">
<description></description>
</setupTask>
<setupTask
xsi:type="setup:EclipseIniTask"
option="-Xms"
value="512m"
vm="true"/>
<setupTask
xsi:type="setup:ResourceCreationTask"
excludedTriggers="STARTUP MANUAL"
content="<?xml version="1.0" encoding="UTF-8"?>
<section name="Workbench">
	<section name="org.eclipse.jdt.internal.ui.packageview.PackageExplorerPart">
		<item value="true" key="group_libraries"/>
		<item value="false" key="linkWithEditor"/>
		<item value="2" key="layout"/>
		<item value="2" key="rootMode"/>
		<item value="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot;?&gt;&#x0D;&#x0A;&lt;packageExplorer configured=&quot;true&quot; group_libraries=&quot;1&quot; layout=&quot;2&quot; linkWithEditor=&quot;0&quot; rootMode=&quot;2&quot; sortWorkingSets=&quot;false&quot; workingSetName=&quot;&quot;&gt;&#x0D;&#x0A;&lt;localWorkingSetManager&gt;&#x0D;&#x0A;&lt;workingSet editPageId=&quot;org.eclipse.jdt.internal.ui.OthersWorkingSet&quot; factoryID=&quot;org.eclipse.ui.internal.WorkingSetFactory&quot; id=&quot;1382792884467_1&quot; label=&quot;Other Projects&quot; name=&quot;Other Projects&quot;/&gt;&#x0D;&#x0A;&lt;/localWorkingSetManager&gt;&#x0D;&#x0A;&lt;activeWorkingSet workingSetName=&quot;Other Projects&quot;/&gt;&#x0D;&#x0A;&lt;allWorkingSets workingSetName=&quot;Other Projects&quot;/&gt;&#x0D;&#x0A;&lt;/packageExplorer&gt;" key="memento"/>
	</section>
</section>
"
targetURL="${workspace.location|uri}/.metadata/.plugins/org.eclipse.jdt.ui/dialog_settings.xml"
encoding="UTF-8">
<description>
Initialize JDT's package explorer to show working sets as
its root objects
</description>
</setupTask>
<setupTask
xsi:type="setup.p2:P2Task">
<requirement
name="epp.package.java"
versionRange="[4.5.0,4.6.0)"/>
<requirement
name="org.eclipse.platform.feature.group"/>
<requirement
name="org.eclipse.jdt.feature.group"/>
<requirement
name="AnyEditTools.feature.group"/>
<requirement
name="org.eclipse.m2e.maveneclipse.feature.feature.group"/>
<requirement
name="org.springframework.ide.eclipse.boot.dash.feature.feature.group"/>
<requirement
name="org.springframework.ide.eclipse.feature.feature.group"/>
<requirement
name="org.springframework.ide.eclipse.maven.feature.feature.group"/>
<requirement
name="org.eclipse.jst.server_adapters.feature.feature.group"/>
<requirement
name="org.eclipse.jst.server_adapters.ext.feature.feature.group"/>
<requirement
name="org.eclipse.jst.server_ui.feature.feature.group"/>
<requirement
name="org.eclipse.wst.server_adapters.feature.feature.group"/>
<requirement
name="org.eclipse.jst.web_ui.feature.feature.group"/>
<requirement
name="org.eclipse.wst.web_ui.feature.feature.group"/>
<requirement
name="org.sonatype.m2e.buildhelper.feature.feature.group"/>
<requirement
name="org.eclipse.mylyn.github.feature.feature.group"/>
<requirement
name="org.springframework.ide.eclipse.jdt.formatter.feature.feature.group"/>
<repository
url="http://download.eclipse.org/technology/epp/packages/mars/R"/>
<repository
url="http://download.eclipse.org/releases/mars"/>
<repository
url="http://andrei.gmxhome.de/eclipse"/>
<repository
url="https://dl.bintray.com/philwebb/m2eclipse-maveneclipse"/>
<repository
url="https://dl.bintray.com/philwebb/spring-eclipse-code-formatter"/>
<repository
url="http://dist.springsource.com/release/TOOLS/update/e4.5"/>
<repository
url="http://download.eclipse.org/egit/github/updates/"/>
<description>
Install the tools needed in the IDE to work with the
source code for ${scope.project.label}
</description>
</setupTask>
<setupTask
xsi:type="maven:MavenImportTask"
id="">
<sourceLocator
rootFolder="${checkout.location}"/>
<description></description>
</setupTask>
<setupTask
xsi:type="maven:MavenImportTask">
<sourceLocator
rootFolder="${checkout.location}/spring-boot-samples"/>
</setupTask>
<setupTask
xsi:type="maven:MavenImportTask">
<sourceLocator
rootFolder="${checkout.location}/spring-boot-deployment-tests"/>
</setupTask>
<setupTask
xsi:type="setup.workingsets:WorkingSetTask">
<workingSet
name="spring-boot">
<predicate
xsi:type="predicates:AndPredicate">
<operand
xsi:type="predicates:NamePredicate"
pattern="spring-boot.*"/>
<operand
xsi:type="workingsets:ExclusionPredicate"
excludedWorkingSet="//@setupTasks.9/@workingSets.3 //@setupTasks.9/@workingSets.2 //@setupTasks.9/@workingSets.4 //@setupTasks.9/@workingSets.1"/>
</predicate>
</workingSet>
<workingSet
name="spring-boot-tools">
<predicate
xsi:type="predicates:NamePredicate"
pattern="spring-boot-(tools|antlib|configuration-.*|loader|.*-tools|.*-plugin)"/>
</workingSet>
<workingSet
name="spring-boot-starters">
<predicate
xsi:type="predicates:NamePredicate"
pattern="spring-boot-starter(s|-.*)"/>
</workingSet>
<workingSet
name="spring-boot-samples">
<predicate
xsi:type="predicates:NamePredicate"
pattern="spring-boot-sample(s|-.*)"/>
</workingSet>
<workingSet
name="spring-boot-tests">
<predicate
xsi:type="predicates:NamePredicate"
pattern="spring-boot-.*-test.*"/>
</workingSet>
</setupTask>
<setupTask
xsi:type="setup:CompoundTask"
name="de.loskutov.anyedit.AnyEditTools">
<setupTask
xsi:type="setup:PreferenceTask"
key="/instance/de.loskutov.anyedit.AnyEditTools/org.eclipse.jdt.ui.editor.tab.width"
value="4"/>
<setupTask
xsi:type="setup:PreferenceTask"
key="/instance/de.loskutov.anyedit.AnyEditTools/saveAndAddLine"
value="true"/>
</setupTask>
<setupTask
xsi:type="setup:CompoundTask"
name="org.eclipse.jdt.ui">
<setupTask
xsi:type="setup:PreferenceTask"
key="/instance/org.eclipse.jdt.ui/content_assist_autoactivation_delay"
value="40"/>
<setupTask
xsi:type="setup:PreferenceTask"
key="/instance/org.eclipse.jdt.ui/content_assist_disabled_computers"
value="org.eclipse.jdt.ui.javaNoTypeProposalCategory$${0x0}org.eclipse.jdt.ui.javaTypeProposalCategory$${0x0}org.eclipse.jdt.ui.textProposalCategory$${0x0}org.eclipse.recommenders.calls.rcp.proposalCategory.templates$${0x0}"/>
<setupTask
xsi:type="setup:PreferenceTask"
key="/instance/org.eclipse.jdt.ui/org.eclipse.jdt.ui.typefilter.enabled"
value="java.awt.*;org.hibernate.collection.*;"/>
<setupTask
xsi:type="setup:PreferenceTask"
key="/instance/org.eclipse.jdt.ui/escape¥Strings"
value="true"/>
<setupTask
xsi:type="setup:PreferenceTask"
key="/instance/org.eclipse.jdt.ui/content_assist_favorite_static_members"
value="org.assertj.core.api.Assertions.*;org.hamcrest.Matchers.*;org.junit.Assert.*;org.mockito.BDDMockito.*;org.mockito.Matchers.*;org.mockito.Mockito.*;org.springframework.test.web.servlet.request.MockMvcRequestBuilders.*;org.springframework.test.web.servlet.result.MockMvcResultMatchers.*"/>
<setupTask
xsi:type="setup:PreferenceTask"
key="/instance/org.eclipse.jdt.ui/smart_backspace"
value="true"/>
<setupTask
xsi:type="setup:PreferenceTask"
key="/instance/org.eclipse.jdt.ui/smart_opening_brace"
value="true"/>
<setupTask
xsi:type="setup:PreferenceTask"
key="/instance/org.eclipse.jdt.ui/smart_semicolon"
value="true"/>
<setupTask
xsi:type="setup:PreferenceTask"
key="/instance/org.eclipse.jdt.ui/smart_tab"
value="true"/>
<setupTask
xsi:type="setup:PreferenceTask"
key="/instance/org.eclipse.jdt.ui/smartIndentAfterNewline"
value="true"/>
<setupTask
xsi:type="setup:PreferenceTask"
key="/instance/org.eclipse.jdt.ui/smartPaste"
value="true"/>
<setupTask
xsi:type="setup:PreferenceTask"
key="/instance/org.eclipse.jdt.ui/org.eclipse.jdt.ui.text.custom_templates"
value="<?xml version="1.0" encoding="UTF-8" standalone="no"?><templates><template autoinsert="false" context="java" deleted="false" description="Surround code with formatter off and formatter on tags" enabled="true" name="noformat">// @formatter:off
$${cursor} $${line_selection}
// @formatter:on</template></templates>"/>
</setupTask>
<setupTask
xsi:type="setup:CompoundTask"
name="org.eclipse.jdt.core">
<setupTask
xsi:type="setup:PreferenceTask"
key="/instance/org.eclipse.jdt.core/org.eclipse.jdt.core.compiler.problem.nonExternalizedStringLiteral"
value="ignore"/>
</setupTask>
<setupTask
xsi:type="setup:CompoundTask"
name="org.eclipse.core.resources">
<setupTask
xsi:type="setup:PreferenceTask"
key="/instance/org.eclipse.core.resources/encoding"
value="UTF-8"/>
<setupTask
xsi:type="setup:PreferenceTask"
key="/instance/org.eclipse.core.resources/refresh.enabled"
value="false"/>
</setupTask>
<setupTask
xsi:type="setup:CompoundTask"
name="org.eclipse.m2e.core">
<setupTask
xsi:type="setup:PreferenceTask"
key="/instance/org.eclipse.m2e.core/eclipse.m2.hideFoldersOfNestedProjects"
value="true"/>
<setupTask
xsi:type="setup:PreferenceTask"
key="/instance/org.eclipse.m2e.core/eclipse.m2.updateIndexes"
value="false"/>
<setupTask
xsi:type="setup:PreferenceTask"
key="/instance/org.eclipse.m2e.core/eclipse.m2.defaultPomEditorPage"
value="true"/>
</setupTask>
<setupTask
xsi:type="setup:CompoundTask"
name="org.eclipse.ui.editors">
<setupTask
xsi:type="setup:PreferenceTask"
key="/instance/org.eclipse.ui.editors/printMarginColumn"
value="90"/>
<setupTask
xsi:type="setup:PreferenceTask"
key="/instance/org.eclipse.ui.editors/lineNumberRuler"
value="false"/>
<setupTask
xsi:type="setup:PreferenceTask"
key="/instance/org.eclipse.ui.editors/printMargin"
value="true"/>
<setupTask
xsi:type="setup:PreferenceTask"
key="/instance/org.eclipse.ui.editors/showWhitespaceCharacters"
value="true"/>
<setupTask
xsi:type="setup:PreferenceTask"
key="/instance/org.eclipse.ui.editors/whitespaceCharacterAlphaValue"
value="20"/>
</setupTask>
<setupTask
xsi:type="setup:CompoundTask"
name="org.springframework.ide.eclipse.beans.core">
<setupTask
xsi:type="setup:PreferenceTask"
key="/instance/org.springframework.ide.eclipse.beans.core/org.springframework.ide.eclipse.beans.core.BeansCorePlugin.DISABLE_AUTO_DETECTION"
value="true"/>
</setupTask>
<setupTask
xsi:type="setup:CompoundTask"
name="org.eclipse.wst.xml.core">
<setupTask
xsi:type="setup:PreferenceTask"
key="/instance/org.eclipse.wst.xml.core/indicateNoGrammar"
value="-1"/>
</setupTask>
<setupTask
xsi:type="setup:CompoundTask"
name="org.springframework.ide.eclipse.core">
<setupTask
xsi:type="setup:PreferenceTask"
key="/instance/org.springframework.ide.eclipse.core/org.springframework.ide.eclipse.core.builders.enable.beanmetadatabuilder"
value="false"/>
<setupTask
xsi:type="setup:PreferenceTask"
key="/instance/org.springframework.ide.eclipse.core/org.springframework.ide.eclipse.core.useChangeDetectionForJavaFiles"
value="false"/>
<setupTask
xsi:type="setup:PreferenceTask"
key="/instance/org.springframework.ide.eclipse.core/org.springframework.ide.eclipse.core.validator.enable.org.springframework.ide.eclipse.beans.core.beansvalidator"
value="false"/>
<setupTask
xsi:type="setup:PreferenceTask"
key="/instance/org.springframework.ide.eclipse.core/org.springframework.ide.eclipse.core.validator.enable.org.springframework.ide.eclipse.boot.bootvalidator"
value="false"/>
<setupTask
xsi:type="setup:PreferenceTask"
key="/instance/org.springframework.ide.eclipse.core/org.springframework.ide.eclipse.core.validator.rule.enable.org.springframework.ide.eclipse.beans.core.beanAlias-org.springframework.ide.eclipse.beans.core.beansvalidator"
value="false"/>
<setupTask
xsi:type="setup:PreferenceTask"
key="/instance/org.springframework.ide.eclipse.core/org.springframework.ide.eclipse.core.validator.rule.enable.org.springframework.ide.eclipse.beans.core.beanClass-org.springframework.ide.eclipse.beans.core.beansvalidator"
value="false"/>
<setupTask
xsi:type="setup:PreferenceTask"
key="/instance/org.springframework.ide.eclipse.core/org.springframework.ide.eclipse.core.validator.rule.enable.org.springframework.ide.eclipse.beans.core.beanConstructorArgument-org.springframework.ide.eclipse.beans.core.beansvalidator"
value="false"/>
<setupTask
xsi:type="setup:PreferenceTask"
key="/instance/org.springframework.ide.eclipse.core/org.springframework.ide.eclipse.core.validator.rule.enable.org.springframework.ide.eclipse.beans.core.beanDefinition-org.springframework.ide.eclipse.beans.core.beansvalidator"
value="false"/>
<setupTask
xsi:type="setup:PreferenceTask"
key="/instance/org.springframework.ide.eclipse.core/org.springframework.ide.eclipse.core.validator.rule.enable.org.springframework.ide.eclipse.beans.core.beanDefinitionHolder-org.springframework.ide.eclipse.beans.core.beansvalidator"
value="false"/>
<setupTask
xsi:type="setup:PreferenceTask"
key="/instance/org.springframework.ide.eclipse.core/org.springframework.ide.eclipse.core.validator.rule.enable.org.springframework.ide.eclipse.beans.core.beanFactory-org.springframework.ide.eclipse.beans.core.beansvalidator"
value="false"/>
<setupTask
xsi:type="setup:PreferenceTask"
key="/instance/org.springframework.ide.eclipse.core/org.springframework.ide.eclipse.core.validator.rule.enable.org.springframework.ide.eclipse.beans.core.beanInitDestroyMethod-org.springframework.ide.eclipse.beans.core.beansvalidator"
value="false"/>
<setupTask
xsi:type="setup:PreferenceTask"
key="/instance/org.springframework.ide.eclipse.core/org.springframework.ide.eclipse.core.validator.rule.enable.org.springframework.ide.eclipse.beans.core.beanProperty-org.springframework.ide.eclipse.beans.core.beansvalidator"
value="false"/>
<setupTask
xsi:type="setup:PreferenceTask"
key="/instance/org.springframework.ide.eclipse.core/org.springframework.ide.eclipse.core.validator.rule.enable.org.springframework.ide.eclipse.beans.core.beanReference-org.springframework.ide.eclipse.beans.core.beansvalidator"
value="false"/>
<setupTask
xsi:type="setup:PreferenceTask"
key="/instance/org.springframework.ide.eclipse.core/org.springframework.ide.eclipse.core.validator.rule.enable.org.springframework.ide.eclipse.beans.core.methodOverride-org.springframework.ide.eclipse.beans.core.beansvalidator"
value="false"/>
<setupTask
xsi:type="setup:PreferenceTask"
key="/instance/org.springframework.ide.eclipse.core/org.springframework.ide.eclipse.core.validator.rule.enable.org.springframework.ide.eclipse.beans.core.parsingProblems-org.springframework.ide.eclipse.beans.core.beansvalidator"
value="false"/>
<setupTask
xsi:type="setup:PreferenceTask"
key="/instance/org.springframework.ide.eclipse.core/org.springframework.ide.eclipse.core.validator.rule.enable.org.springframework.ide.eclipse.boot.missingConfigurationProcessor-org.springframework.ide.eclipse.boot.bootvalidator"
value="false"/>
</setupTask>
<setupTask
xsi:type="setup:CompoundTask"
name="org.eclipse.ui.workbench">
<setupTask
xsi:type="setup:PreferenceTask"
key="/instance/org.eclipse.ui.workbench/HeapStatus.showMax"
value="true"/>
<setupTask
xsi:type="setup:PreferenceTask"
key="/instance/org.eclipse.ui.workbench/RUN_IN_BACKGROUND"
value="true"/>
</setupTask>
<stream name="default"
label="Default"/>
<logicalProjectContainer
xsi:type="setup:ProjectCatalog"
href="index:/org.eclipse.setup#//@projectCatalogs[name='com.github']"/>
<description>
Eclipse project setup for people wishing to contribute to
Spring Boot.
</description>
</setup:Project>
| {
"pile_set_name": "Github"
} |
DELETE FROM defaults WHERE setting_key = 'queue_payments';
| {
"pile_set_name": "Github"
} |
5700: Ideea națiunii, țara, statul național CJK : gok3 gwok3 : guó
5701:
5702: Ideograme de porc; CJK : wan6 : hun
5703: Grădină ideografică, câmp cultivat CJK : bou2 pou2 : pǔ
5704: Închisoarea ideografică, închisoarea CJK : jyu5 : yǔ
5705: Corespondență ideografică; un caz; o cutie CJK : HAN
5706: Cerc de ideograme; rotund, circular; CJK completă : jyun4 : yuan
5707: Ideografie totală, completă, CJK întreagă : leon4 : LUN
5708: Ideografia în cerc; un cerc; corral CJK : gyun6 hyun1 : quan
5709: Ideograma stabilă, corral, incintă; frontiera CJK de frontieră : jyu5 : yǔ
570A: Ideografia cameră de odihnă CJK : cing1 : qing
570B: Ideea națiunii, țara, statul național CJK : gwok3 : guó
570C: Ideograma gard CJK : cyun2 cyun4 seoi4 : chuan
570D: Ideograma înconjoară, înconjoară, corral CJK : wai4 : WEI
570E: Cerc de ideograme; rotund, circular; CJK completă : yuan
570F: Ideografia în cerc; un cerc; corral CJK : quan
5710:
5711:
5712: Grădina ideografică; parc, livadă CJK : jyun4 : yuan
5713: Cerc de ideograme; rotund, circular; CJK completă : jyun4 : yuan
5714:
5715: Biblioteca de idei CJK : syu1 : tú
5716: Schema de ideograme; diagramă, harta, imagine CJK : tou4 : tú
5717: Schema de ideograme; diagramă, harta, imagine CJK : tú
5718: Sferă ideologică, minge, cerc; masa, CJK forfetară : tyun4 : tuan
5719:
571A:
571B: Ideografia folosită în numele locurilor; stratus CJK : jik6 : yi
571C: Cercul ideog, înconjură; encirclează CJK : jyun4 waan4 : huan
571D: Ideea întreaga CJK : lyun4 : Luan
571E: Ideografie intregi, complete; rotund CJK : lyun4 : Luan
571F: Ideologia solului, a pământului; articole din pământ CJK : tou2 : tǔ
5720:
5721:
5722: Căi de ideogramă între câmpurile CJK : ding1 ting5 : tǐng
5723: Ideografie sfântă, sacră CJK : sing3 : sheng
5724:
5725: Ideograma ciupercă CJK : lu
5726: Ideog (kokuji) poarta de apă, gura de scurgere CJK : Kuai
5727: Ideografia la fel ca 壓 U + 58D3, pentru a apăsa; să asupri; a zdrobi; presiune CJK : YA
5728: Ideografia să fie la, în; constă în, restul CJK : zoi6 : zai
5729: Piatra de ideografie, cimpul CJK : heoi1 jyu4 wai4 : WEI
572A:
572B:
572C: Ideograma tencuiala cu strat de noroi CJK : wu1 : Wu
572D: Ideea jade a indicat CJK de sus : gwai1 : interfață grafică
572E: Ideografia distrusă, ruinată; să se răstoarne la rănirea CJK : pei2 : pǐ
572F: Podul Ideograph, banca CJK : ji4 : yi
5730: Pământ ideologic; sol, sol; regiunea CJK : dei6 deng6 : de dì
5731:
5732:
5733: Ideografie brazdă în câmp, mici scurgere CJK : zan3 : zhen
5734:
5735:
5736:
5737:
5738:
5739: Mormânt ideografic, mormânt : prerie; deschisă sălbăticie CJK : kwong3 : Kuang
573A: Ideografia spațiului deschis, câmpul, piața CJK : coeng4 : chǎng
573B: Limita ideografică, limita CJK : kei4 ngan4 : Qi-ul
573C:
573D:
573E: Gunoaie ideologică, gunoi; scuturare; pericol CJK : kap1 saap3 : ji
573F:
5740: Ideografie, locație, teren pentru casa CJK : zi2 : zhǐ
5741:
5742: Ideograma dealului, locuri incomode CJK : baan2 : bǎn
5743:
5744:
5745:
5746: Ideografia unui mormânt, mormânt CJK : fan4 : MEI
5747: Ideogie egală, echitabilă; toate, de asemenea, CJK : gwan1 kwan1 wan6 : iunie
5748:
5749:
574A: Cartierul ideologic, subdiviziunea urbană; (J) reședința preotului; (Budist) preot; băiețel CJK : fong1 fong4 : gheară
574B: Ideografie praf, pământ; o grămadă de pământ; să sape; pentru a reuni CJK : ban3 fan5 fan6 : BeN
574C: Ideografie praf, pământ; o grămadă de pământ; să sape; pentru a reuni CJK : ban3 ban6 : BeN
574D: Ideea de colaps; alunecarea de teren CJK : taan1 : bronza
574E: Gropă ideografică, gaură; capcana, capcana; criză CJK : ham1 ham2 : kǎn
574F: Ideografia putred, rasfatata, proasta, defalcata de CJK : pui1 pui4 : Huai
5750: Ideografia sta; scaun; plimbare, călătorie de CJK : co5 zo6 : Zuo
5751: Gropă ideografică, gaură; îngropa, capcana; harry CJK : haang1 : Keng
5752: Ideografia de a compara; a se potrivi; la egalitate CJK : bai6 bei2 : bI
5753:
5754:
5755:
5756:
5757: Piesă de ideogramă; dolar CJK : faai3 : Kuai
5758:
5759:
575A: Ideografia este tare, puternică, fermă; hotărât CJK : gin1 : jian
575B: Ideograma altar; arena, sala de examinare CJK : taan4 : bronza
575C: Ideografie o gaură, groapă CJK : lik6 : lì
575D: Tigla ideologică; barajul CJK : Baa3 : Bà
575E: Construcție ideografică, bancă, perete scăzut CJK : wu2 : Wu
575F: Mormânt de ideografie, movilă; umflatura; bombat CJK : fan4 : baltă
5760: Ideografia cade jos, picătură, chiuvetă, du-te la ruina CJK : zui6 : zhuì
5761: Pantă ideografică, bancă, deal CJK : bo1 po1 : pO
5762:
5763:
5764: Pământ ideologic; feminin, feminin CJK : kwan1 : kun
5765:
5766: Ideograma plană, netedă; auto-posedat CJK : taan2 : tǎn
5767:
5768: Ideografia unui CJK : to4 : tuo
5769: Ideea de faianță, vase din ceramică CJK : ham1 : gān
576A: Suprafață ideologică; Masura japoneza CJK : ping4 : de ping
576B: Ideografia este un stand pe care să se înlocuiască bomboanele după consumul de CJK : dim3 : dian
576C:
576D: Ideea de noroi, mori; pentru a lipi, pentru a tencui CJK : nai4 : Ní
576E: Platformă ideologică; unitate; Adresa CJK : TAI
576F: Baraj de ideog, dig; Weir CJK : pui1 : pI
5770: Ideea sălbatică CJK : gwing1 : Jiong
5771: Ideograma prafului CJK : joeng2 joeng5 : yǎng
5772:
5773: Ideografia este un gol în pământ, o cavitate, depresie; CJK deprimat : aau1 aau3 : AO
5774: Ideografia este un pământ; teren CJK : luk6 : lu
5775: Ideea de faianță, vase din ceramică CJK : jau1 : QIU
5776:
5777: Piciorul ideologic al pământului, bucată de sol CJK : ho1 ho2 : ke
5778:
5779:
577A: Clasificator de ideograme (Cant.) Pentru masele moi CJK : paat6 pet6 : Bá
577B: Ideografia este un insule, o stâncă într-un râu; un dig; pentru a opri CJK : ci4 dai2 : Chí
577C: Ideografie împărțită, lacrimă, deschis CJK : caak3 : che
577D:
577E:
577F: Mormânt ideografic; folosit în numele de loc CJK : fu6 : fU
5780:
5781:
5782: Ideografia a scăzut; suspenda, mână; în jos CJK : seoi4 : Chui
5783: Gunoaie ideologică, deșeuri, deșeuri CJK : laap6 : la
5784: Mormânt de ideografie, movilă; creasta în câmp CJK : lung5 : lǒng
5785: Mormânt ideografic, mormânt; creasta în câmp CJK : lǒng
5786: Ideograme negre de pământ; magazin, colibă CJK : lou4 : lu
5787:
5788: Ideografia folosită în numele locurilor; Japoneză-noută; Coreeană -dae CJK : doi6 : DAI
5789:
578A:
578B: Model de ideogramă, model, tip; lege; mucegai CJK : jing4 : xing
578C: Ideografia unui câmp; folosit în numele de loc CJK : dung6 tung6 : DONG
578D:
578E:
578F:
5790:
5791:
5792: Ideogramă, perete militar CJK : leoi5 : lei
5793: Limita ideologică, frontieră, frontieră CJK : goi1 : gai
5794: Ideografia de restabilire; să barbă un curent și să-și schimbe direcția; o movilă CJK : jan1 : YIN
5795: Ideografia folosită în numele locului CJK : hau5 : HOU
5796:
5797: Ideograma sacrificiu CJK : ziu6 : zhao
5798:
5799:
579A: Piatra ideografică, masa rotundă CJK : jiu1 jiu4 : yao
579B: Grămadă de ideograme; îngrămădiți-vă, ridicați CJK : DO2 : duǒ
579C: Gropul de ideograme; contrafort; CJK : DO2 : duǒ
579D: Ideografia dărăpănată, ruinată de CJK : gwai2 : guǐ
579E: Ideogramă mică movilă; numele locului; colț CJK : caa4 : Chá
579F:
57A0: Limita ideologică, malul râului sau fluviului CJK : ngan4 : YIN
57A1: Ideogul pământului; numele locului CJK : fat6 : fa
57A2: Ideografie murdărie, murdărie, pete; murdar CJK : gau3 : Gou
57A3: Ideograma de perete scăzut CJK : wun4 : yuan
57A4: Ideograma furnică, mormânt mic; deal CJK : dit6 : a muri
57A5:
57A6: Ideografia cultivă, reclamă, pe terenul agricol CJK : han2 : orizont
57A7: Ideea variabilă a unității de măsură a terenului CJK : hoeng2 : shǎng
57A8:
57A9: Ideogie sfântă, sacră, salvie; simplificarea cretei 堊; cremă cu cretă CJK : ok3 : è
57AA:
57AB: Ideografia avansează bani, plătește pentru un alt CJK : DIN6 : dian
57AC:
57AD: Ideografia caracterului folosit în loc de nume CJK : YA
57AE: Ideografia este învinsă, eșuează, se prăbușește CJK : kwaa1 : kuǎ
57AF:
57B0:
57B1:
57B2: Ideografia locului CJK înaltă și uscată : hoi2 : kǎi
57B3:
57B4: Ideografie mică, în formă de cap în formă de deal, folosit în loc de nume CJK : lou5 nou5 : nǎo
57B5: Acoperirea ideogiei cu pământul; o groapă; o gaură CJK : am1 am2 jim2 : ǎn
57B6:
57B7:
57B8: Ideografia unui dig, a CJK : jyun4 jyun6 : yuan
57B9:
57BA:
57BB: Ideografia unui dig; o pantă pe care barca trece CJK : Baa3 : Bà
57BC:
57BD: Sedimentul ideografic (Cant.), Precipitat CJK : jan6 ngan6 : YIN
57BE:
57BF:
57C0: Ideografia a scăzut; suspenda, mână; în jos CJK : Chui
57C1:
57C2: Gropi de ideog pentru irigare; gaura CJK : gang2 : geng
57C3: Ideograma praf fin, murdărie CJK : aai1 oi1 : Ai
57C4: Ideograma învârtirea prafului în vânt CJK : bung6 : Beng
57C5:
57C6: Ideea CJK piatră : gok3 kok3 : QUE
57C7: Numele de idee al podului CJK : cung1 jung2 tung2 : yǒng
57C8: Ideografia înaltă, abrupte, înalte; CJK : seon3 : iunie
57C9:
57CA:
57CB: Ideograma îngropa, secreta, ascunde CJK : maai4 : Mai
57CC: Ideea deșeurilor CJK : joeng5 long6 : lang
57CD:
57CE: Cetatea ideografică; oraș, oraș; municipiul CJK : seng4 sing4 : cheng
57CF: Ideografia o limită, o limită CJK : jin4 : shan
57D0:
57D1:
57D2: Podea ideografică, dig, dig CJK : lyut3 : minciună
57D3: Podea ideografică, dig, dig CJK : minciună
57D4: Ideografia simplă, arena; port, piața CJK : bou3 : Babcock
57D5: Ideografia unui vas mare de ceramică CJK în formă de pară : cing4 : cheng
57D6:
57D7: Ideografia (la fel ca U + 5E70 埠) un debarcader, doc, debarcader; un centru comercial, port; numele locului (de exemplu, 深水埗 în Hong Kong) CJK : bou4 bou6 : Babcock
57D8: Ideograma rosti CJK : Si4 : SHI
57D9: Instrumentul ideologic CJK : hyun1 : Xun
57DA: Creuzet de idee CJK : wo1 : guo
57DB:
57DC: Ideografie deschisă, domeniu; pustie CJK : je5 : voi
57DD: Bancă ideologică, dig; purtuberanță de la CJK : NAP1 : Nian
57DE: Ideograma (Cant.) Loc CJK : deng6 : di
57DF: Cartierul ideografic, regiunea, granița; teren CJK : wik6 : YU
57E0: Portul de ideograme CJK : bou6 fau6 : Babcock
57E1: Ideografia caracterului folosit în loc de nume CJK : aa3 ngaa3 : YA
57E2:
57E3:
57E4: Ideograma adaugă, crește, atașați; gardul inferior CJK : pei4 : Pí
57E5:
57E6: Ideogramă un castron CJK : wun2 : wǎn
57E7: Ideograma barajului CJK : geoi6 : jù
57E8:
57E9:
57EA:
57EB:
57EC:
57ED: Ideografia unui baraj, un jock; înclinat pe un canal, în cazul în care bărcile pot fi tractate în sus sau în jos CJK : dai6 doi6 : DAI
57EE:
57EF: Acoperirea ideogiei cu pământul; o groapă; o gaură CJK : am2 jim2 : ǎn
57F0: Ideograph fief CJK : coi3 : Cai
57F1:
57F2: Ideograf (Cant.) Clasificator pentru pereți CJK : bong6 bung6 pung1 : Beng
57F3: Ideografia o groapă, o gaură, o capcană, un pericol; o criză; un mortar CJK : ham2 : kǎn
57F4: Pământ ideografiu cu conținut ridicat de argilă CJK : zik6 : ZHI
57F5: Ideea de murdărie sau lut întărită; cluster CJK : DO2 : duǒ
57F6: Ideea artei CJK : ngai6 zap1 : yi
57F7: Ideografia este în mână; a pastra; efectuați CJK : ZAP1 : ZHI
57F8: Ideografia unei granițe; o limită; o dig; o frontieră; o limită CJK : jik6 : yi
57F9: Ideografia se strânge cu murdărie; cultiva CJK : pui4 : PEI
57FA: Ideografie fundație, bază CJK : gei1 : ji
57FB:
57FC: Ideogramul CJK : kei4 : Qi-ul
57FD: Mătură ideografică; să măture, să îndepărtezi CJK : sou3 : SAO
57FE:
57FF:
| {
"pile_set_name": "Github"
} |
# SPDX-License-Identifier: BSD-3-Clause
---
- include_role:
name: linux-system-roles.network
vars:
network_connections:
- name: "{{ interface }}"
state: up
persistent_state: present
type: ethernet
autoconnect: yes
ip:
address: 192.0.2.1/24
network_provider: initscripts
- include_role:
name: linux-system-roles.network
vars:
network_connections:
- name: "{{ interface }}"
state: down
persistent_state: absent
network_provider: initscripts
...
| {
"pile_set_name": "Github"
} |
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build s390x,!go1.11 !amd64,!s390x,!ppc64le gccgo purego
package poly1305
func sum(out *[TagSize]byte, msg []byte, key *[32]byte) {
h := newMAC(key)
h.Write(msg)
h.Sum(out)
}
| {
"pile_set_name": "Github"
} |
#!/bin/bash
set -e
# Add kibana as command if needed
if [[ "$1" == -* ]]; then
set -- kibana "$@"
fi
# Run as user "elstack" if the command is "kibana"
if [ "$1" = 'kibana' ]; then
if [ "$ELASTICSEARCH_URL" ]; then
sed -ri "s!^(\#\s*)?(elasticsearch\.url:).*!\2 '$ELASTICSEARCH_URL'!" /etc/kibana/kibana.yml
fi
set -- su-exec elstack tini -- "$@"
fi
exec "$@"
| {
"pile_set_name": "Github"
} |
DataverseUse TinySocial
| {
"pile_set_name": "Github"
} |
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.default = void 0;
var _default = {
// Options.jsx
items_per_page: 'تال/ھەر بەت',
jump_to: 'بەتكە سەكرەش',
jump_to_confirm: 'مۇقىملاشتۇرۇش',
page: 'بەت',
// Pagination.jsx
prev_page: 'ئالدىنقى',
next_page: 'كېيىنكى',
prev_5: 'ئالدىغا 5 بەت',
next_5: 'كەينىگە 5 بەت',
prev_3: 'ئالدىغا 3 بەت',
next_3: 'كەينىگە 3 بەت'
};
exports.default = _default; | {
"pile_set_name": "Github"
} |
#ifndef LIST_H
#define LIST_H
/*
* Copied from include/linux/...
*/
#undef offsetof
#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
/**
* container_of - cast a member of a structure out to the containing structure
* @ptr: the pointer to the member.
* @type: the type of the container struct this is embedded in.
* @member: the name of the member within the struct.
*
*/
#define container_of(ptr, type, member) ({ \
const typeof( ((type *)0)->member ) *__mptr = (ptr); \
(type *)( (char *)__mptr - offsetof(type,member) );})
struct list_head {
struct list_head *next, *prev;
};
#define LIST_HEAD_INIT(name) { &(name), &(name) }
#define LIST_HEAD(name) \
struct list_head name = LIST_HEAD_INIT(name)
/**
* list_entry - get the struct for this entry
* @ptr: the &struct list_head pointer.
* @type: the type of the struct this is embedded in.
* @member: the name of the list_struct within the struct.
*/
#define list_entry(ptr, type, member) \
container_of(ptr, type, member)
/**
* list_for_each_entry - iterate over list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*/
#define list_for_each_entry(pos, head, member) \
for (pos = list_entry((head)->next, typeof(*pos), member); \
&pos->member != (head); \
pos = list_entry(pos->member.next, typeof(*pos), member))
/**
* list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
* @pos: the type * to use as a loop cursor.
* @n: another type * to use as temporary storage
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*/
#define list_for_each_entry_safe(pos, n, head, member) \
for (pos = list_entry((head)->next, typeof(*pos), member), \
n = list_entry(pos->member.next, typeof(*pos), member); \
&pos->member != (head); \
pos = n, n = list_entry(n->member.next, typeof(*n), member))
/**
* list_empty - tests whether a list is empty
* @head: the list to test.
*/
static inline int list_empty(const struct list_head *head)
{
return head->next == head;
}
/*
* Insert a new entry between two known consecutive entries.
*
* This is only for internal list manipulation where we know
* the prev/next entries already!
*/
static inline void __list_add(struct list_head *_new,
struct list_head *prev,
struct list_head *next)
{
next->prev = _new;
_new->next = next;
_new->prev = prev;
prev->next = _new;
}
/**
* list_add_tail - add a new entry
* @new: new entry to be added
* @head: list head to add it before
*
* Insert a new entry before the specified head.
* This is useful for implementing queues.
*/
static inline void list_add_tail(struct list_head *_new, struct list_head *head)
{
__list_add(_new, head->prev, head);
}
/*
* Delete a list entry by making the prev/next entries
* point to each other.
*
* This is only for internal list manipulation where we know
* the prev/next entries already!
*/
static inline void __list_del(struct list_head *prev, struct list_head *next)
{
next->prev = prev;
prev->next = next;
}
#define LIST_POISON1 ((void *) 0x00100100)
#define LIST_POISON2 ((void *) 0x00200200)
/**
* list_del - deletes entry from list.
* @entry: the element to delete from the list.
* Note: list_empty() on entry does not return true after this, the entry is
* in an undefined state.
*/
static inline void list_del(struct list_head *entry)
{
__list_del(entry->prev, entry->next);
entry->next = (struct list_head*)LIST_POISON1;
entry->prev = (struct list_head*)LIST_POISON2;
}
#endif
| {
"pile_set_name": "Github"
} |
/*
* Copyright (C) 2012 Indragie Karunaratne <[email protected]>
* All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - Neither the name of Indragie Karunaratne nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#import "SNRQueueItem.h"
#import "SNRThumbnailArtwork.h"
#import "SNRLastFMEngine-SNRAdditions.h"
#import "NSUserDefaults-SNRAdditions.h"
#import "NSManagedObjectContext-SNRAdditions.h"
@implementation SNRQueueItem
@synthesize played = _played;
@synthesize song = _song;
#pragma mark - NSCopying
- (id)copyWithZone:(NSZone *)zone
{
SNRQueueItem *item = [[SNRQueueItem allocWithZone:zone] init];
item->_song = _song;
return item;
}
#pragma mark - NSCoding
- (id)initWithCoder:(NSCoder *)aDecoder
{
if ((self = [super init])) {
NSURL *URL = [aDecoder decodeObjectForKey:@"song"];
if (URL) {
NSManagedObjectContext *context = SONORA_MANAGED_OBJECT_CONTEXT;
NSManagedObjectID *objectID = [context.persistentStoreCoordinator managedObjectIDForURIRepresentation:URL];
if (objectID) {
_song = (SNRSong*)[context existingObjectWithID:objectID error:nil];
if (!_song) {
return nil;
}
} else {
return nil;
}
}
}
return self;
}
- (void)encodeWithCoder:(NSCoder *)aCoder
{
NSURL *URI = [[self.song objectID] URIRepresentation];
[aCoder encodeObject:URI forKey:@"song"];
}
#pragma mark - Accessors
- (void)setPlayed:(BOOL)played
{
if (_played != played) {
_played = played;
if (!_played) { return; }
SNRLastFMEngine *engine = [SNRLastFMEngine sharedInstance];
NSUserDefaults *ud = [NSUserDefaults standardUserDefaults];
if ([engine isAuthenticated] && ud.scrobble) {
[engine scrobbleSong:self.song];
}
[self.song addPlayCountObjectWithDate:[NSDate date]];
[self.song.managedObjectContext saveChanges];
}
}
#pragma mark - Public API
- (void)lastFMUpdateNowPlaying
{
SNRLastFMEngine *engine = [SNRLastFMEngine sharedInstance];
NSUserDefaults *ud = [NSUserDefaults standardUserDefaults];
if ([engine isAuthenticated] && ud.scrobble) {
[engine updateNowPlayingWithSong:self.song];
}
}
- (void)lastFMLoveTrack
{
SNRLastFMEngine *engine = [SNRLastFMEngine sharedInstance];
if ([engine isAuthenticated]) {
[engine loveSong:self.song];
}
}
- (void)postGrowlNotification
{
if ([[NSUserDefaults standardUserDefaults] growlNowPlaying]) {
[GrowlApplicationBridge notifyWithTitle:self.song.name description:self.song.album.artist.name notificationName:kGrowlNotificationNowPlaying iconData:self.song.album.thumbnailArtwork.data priority:0 isSticky:NO clickContext:@"" identifier:kGrowlNotificationNowPlaying];
}
}
@end | {
"pile_set_name": "Github"
} |
/*
* QEMU PCI bus manager
*
* Copyright (c) 2004 Fabrice Bellard
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "hw/hw.h"
#include "hw/pci/pci.h"
#include "hw/pci/pci_bridge.h"
#include "hw/pci/pci_bus.h"
#include "hw/pci/pci_host.h"
#include "monitor/monitor.h"
#include "net/net.h"
#include "sysemu/sysemu.h"
#include "hw/loader.h"
#include "qemu/range.h"
#include "qmp-commands.h"
#include "hw/pci/msi.h"
#include "hw/pci/msix.h"
#include "exec/address-spaces.h"
#include "hw/hotplug.h"
//#define DEBUG_PCI
#ifdef DEBUG_PCI
# define PCI_DPRINTF(format, ...) printf(format, ## __VA_ARGS__)
#else
# define PCI_DPRINTF(format, ...) do { } while (0)
#endif
static void pcibus_dev_print(Monitor *mon, DeviceState *dev, int indent);
static char *pcibus_get_dev_path(DeviceState *dev);
static char *pcibus_get_fw_dev_path(DeviceState *dev);
static void pcibus_reset(BusState *qbus);
static Property pci_props[] = {
DEFINE_PROP_PCI_DEVFN("addr", PCIDevice, devfn, -1),
DEFINE_PROP_STRING("romfile", PCIDevice, romfile),
DEFINE_PROP_UINT32("rombar", PCIDevice, rom_bar, 1),
DEFINE_PROP_BIT("multifunction", PCIDevice, cap_present,
QEMU_PCI_CAP_MULTIFUNCTION_BITNR, false),
DEFINE_PROP_BIT("command_serr_enable", PCIDevice, cap_present,
QEMU_PCI_CAP_SERR_BITNR, true),
DEFINE_PROP_END_OF_LIST()
};
static const VMStateDescription vmstate_pcibus = {
.name = "PCIBUS",
.version_id = 1,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
VMSTATE_INT32_EQUAL(nirq, PCIBus),
VMSTATE_VARRAY_INT32(irq_count, PCIBus,
nirq, 0, vmstate_info_int32,
int32_t),
VMSTATE_END_OF_LIST()
}
};
static void pci_bus_realize(BusState *qbus, Error **errp)
{
PCIBus *bus = PCI_BUS(qbus);
vmstate_register(NULL, -1, &vmstate_pcibus, bus);
}
static void pci_bus_unrealize(BusState *qbus, Error **errp)
{
PCIBus *bus = PCI_BUS(qbus);
vmstate_unregister(NULL, &vmstate_pcibus, bus);
}
static void pci_bus_class_init(ObjectClass *klass, void *data)
{
BusClass *k = BUS_CLASS(klass);
k->print_dev = pcibus_dev_print;
k->get_dev_path = pcibus_get_dev_path;
k->get_fw_dev_path = pcibus_get_fw_dev_path;
k->realize = pci_bus_realize;
k->unrealize = pci_bus_unrealize;
k->reset = pcibus_reset;
}
static const TypeInfo pci_bus_info = {
.name = TYPE_PCI_BUS,
.parent = TYPE_BUS,
.instance_size = sizeof(PCIBus),
.class_init = pci_bus_class_init,
};
static const TypeInfo pcie_bus_info = {
.name = TYPE_PCIE_BUS,
.parent = TYPE_PCI_BUS,
};
static PCIBus *pci_find_bus_nr(PCIBus *bus, int bus_num);
static void pci_update_mappings(PCIDevice *d);
static void pci_irq_handler(void *opaque, int irq_num, int level);
static int pci_add_option_rom(PCIDevice *pdev, bool is_default_rom);
static void pci_del_option_rom(PCIDevice *pdev);
static uint16_t pci_default_sub_vendor_id = PCI_SUBVENDOR_ID_REDHAT_QUMRANET;
static uint16_t pci_default_sub_device_id = PCI_SUBDEVICE_ID_QEMU;
static QLIST_HEAD(, PCIHostState) pci_host_bridges;
static int pci_bar(PCIDevice *d, int reg)
{
uint8_t type;
if (reg != PCI_ROM_SLOT)
return PCI_BASE_ADDRESS_0 + reg * 4;
type = d->config[PCI_HEADER_TYPE] & ~PCI_HEADER_TYPE_MULTI_FUNCTION;
return type == PCI_HEADER_TYPE_BRIDGE ? PCI_ROM_ADDRESS1 : PCI_ROM_ADDRESS;
}
static inline int pci_irq_state(PCIDevice *d, int irq_num)
{
return (d->irq_state >> irq_num) & 0x1;
}
static inline void pci_set_irq_state(PCIDevice *d, int irq_num, int level)
{
d->irq_state &= ~(0x1 << irq_num);
d->irq_state |= level << irq_num;
}
static void pci_change_irq_level(PCIDevice *pci_dev, int irq_num, int change)
{
PCIBus *bus;
for (;;) {
bus = pci_dev->bus;
irq_num = bus->map_irq(pci_dev, irq_num);
if (bus->set_irq)
break;
pci_dev = bus->parent_dev;
}
bus->irq_count[irq_num] += change;
bus->set_irq(bus->irq_opaque, irq_num, bus->irq_count[irq_num] != 0);
}
int pci_bus_get_irq_level(PCIBus *bus, int irq_num)
{
assert(irq_num >= 0);
assert(irq_num < bus->nirq);
return !!bus->irq_count[irq_num];
}
/* Update interrupt status bit in config space on interrupt
* state change. */
static void pci_update_irq_status(PCIDevice *dev)
{
if (dev->irq_state) {
dev->config[PCI_STATUS] |= PCI_STATUS_INTERRUPT;
} else {
dev->config[PCI_STATUS] &= ~PCI_STATUS_INTERRUPT;
}
}
void pci_device_deassert_intx(PCIDevice *dev)
{
int i;
for (i = 0; i < PCI_NUM_PINS; ++i) {
pci_irq_handler(dev, i, 0);
}
}
static void pci_do_device_reset(PCIDevice *dev)
{
int r;
pci_device_deassert_intx(dev);
assert(dev->irq_state == 0);
/* Clear all writable bits */
pci_word_test_and_clear_mask(dev->config + PCI_COMMAND,
pci_get_word(dev->wmask + PCI_COMMAND) |
pci_get_word(dev->w1cmask + PCI_COMMAND));
pci_word_test_and_clear_mask(dev->config + PCI_STATUS,
pci_get_word(dev->wmask + PCI_STATUS) |
pci_get_word(dev->w1cmask + PCI_STATUS));
dev->config[PCI_CACHE_LINE_SIZE] = 0x0;
dev->config[PCI_INTERRUPT_LINE] = 0x0;
for (r = 0; r < PCI_NUM_REGIONS; ++r) {
PCIIORegion *region = &dev->io_regions[r];
if (!region->size) {
continue;
}
if (!(region->type & PCI_BASE_ADDRESS_SPACE_IO) &&
region->type & PCI_BASE_ADDRESS_MEM_TYPE_64) {
pci_set_quad(dev->config + pci_bar(dev, r), region->type);
} else {
pci_set_long(dev->config + pci_bar(dev, r), region->type);
}
}
pci_update_mappings(dev);
msi_reset(dev);
msix_reset(dev);
}
/*
* This function is called on #RST and FLR.
* FLR if PCI_EXP_DEVCTL_BCR_FLR is set
*/
void pci_device_reset(PCIDevice *dev)
{
qdev_reset_all(&dev->qdev);
pci_do_device_reset(dev);
}
/*
* Trigger pci bus reset under a given bus.
* Called via qbus_reset_all on RST# assert, after the devices
* have been reset qdev_reset_all-ed already.
*/
static void pcibus_reset(BusState *qbus)
{
PCIBus *bus = DO_UPCAST(PCIBus, qbus, qbus);
int i;
for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) {
if (bus->devices[i]) {
pci_do_device_reset(bus->devices[i]);
}
}
for (i = 0; i < bus->nirq; i++) {
assert(bus->irq_count[i] == 0);
}
}
static void pci_host_bus_register(PCIBus *bus, DeviceState *parent)
{
PCIHostState *host_bridge = PCI_HOST_BRIDGE(parent);
QLIST_INSERT_HEAD(&pci_host_bridges, host_bridge, next);
}
PCIBus *pci_find_primary_bus(void)
{
PCIBus *primary_bus = NULL;
PCIHostState *host;
QLIST_FOREACH(host, &pci_host_bridges, next) {
if (primary_bus) {
/* We have multiple root buses, refuse to select a primary */
return NULL;
}
primary_bus = host->bus;
}
return primary_bus;
}
PCIBus *pci_device_root_bus(const PCIDevice *d)
{
PCIBus *bus = d->bus;
while ((d = bus->parent_dev) != NULL) {
bus = d->bus;
}
return bus;
}
const char *pci_root_bus_path(PCIDevice *dev)
{
PCIBus *rootbus = pci_device_root_bus(dev);
PCIHostState *host_bridge = PCI_HOST_BRIDGE(rootbus->qbus.parent);
PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_GET_CLASS(host_bridge);
assert(!rootbus->parent_dev);
assert(host_bridge->bus == rootbus);
if (hc->root_bus_path) {
return (*hc->root_bus_path)(host_bridge, rootbus);
}
return rootbus->qbus.name;
}
static void pci_bus_init(PCIBus *bus, DeviceState *parent,
const char *name,
MemoryRegion *address_space_mem,
MemoryRegion *address_space_io,
uint8_t devfn_min)
{
assert(PCI_FUNC(devfn_min) == 0);
bus->devfn_min = devfn_min;
bus->address_space_mem = address_space_mem;
bus->address_space_io = address_space_io;
/* host bridge */
QLIST_INIT(&bus->child);
pci_host_bus_register(bus, parent);
}
bool pci_bus_is_express(PCIBus *bus)
{
return object_dynamic_cast(OBJECT(bus), TYPE_PCIE_BUS);
}
bool pci_bus_is_root(PCIBus *bus)
{
return !bus->parent_dev;
}
void pci_bus_new_inplace(PCIBus *bus, size_t bus_size, DeviceState *parent,
const char *name,
MemoryRegion *address_space_mem,
MemoryRegion *address_space_io,
uint8_t devfn_min, const char *typename)
{
qbus_create_inplace(bus, bus_size, typename, parent, name);
pci_bus_init(bus, parent, name, address_space_mem,
address_space_io, devfn_min);
}
PCIBus *pci_bus_new(DeviceState *parent, const char *name,
MemoryRegion *address_space_mem,
MemoryRegion *address_space_io,
uint8_t devfn_min, const char *typename)
{
PCIBus *bus;
bus = PCI_BUS(qbus_create(typename, parent, name));
pci_bus_init(bus, parent, name, address_space_mem,
address_space_io, devfn_min);
return bus;
}
void pci_bus_irqs(PCIBus *bus, pci_set_irq_fn set_irq, pci_map_irq_fn map_irq,
void *irq_opaque, int nirq)
{
bus->set_irq = set_irq;
bus->map_irq = map_irq;
bus->irq_opaque = irq_opaque;
bus->nirq = nirq;
bus->irq_count = g_malloc0(nirq * sizeof(bus->irq_count[0]));
}
PCIBus *pci_register_bus(DeviceState *parent, const char *name,
pci_set_irq_fn set_irq, pci_map_irq_fn map_irq,
void *irq_opaque,
MemoryRegion *address_space_mem,
MemoryRegion *address_space_io,
uint8_t devfn_min, int nirq, const char *typename)
{
PCIBus *bus;
bus = pci_bus_new(parent, name, address_space_mem,
address_space_io, devfn_min, typename);
pci_bus_irqs(bus, set_irq, map_irq, irq_opaque, nirq);
return bus;
}
int pci_bus_num(PCIBus *s)
{
if (pci_bus_is_root(s))
return 0; /* pci host bridge */
return s->parent_dev->config[PCI_SECONDARY_BUS];
}
static int get_pci_config_device(QEMUFile *f, void *pv, size_t size)
{
PCIDevice *s = container_of(pv, PCIDevice, config);
PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(s);
uint8_t *config;
int i;
assert(size == pci_config_size(s));
config = g_malloc(size);
qemu_get_buffer(f, config, size);
for (i = 0; i < size; ++i) {
if ((config[i] ^ s->config[i]) &
s->cmask[i] & ~s->wmask[i] & ~s->w1cmask[i]) {
g_free(config);
return -EINVAL;
}
}
memcpy(s->config, config, size);
pci_update_mappings(s);
if (pc->is_bridge) {
PCIBridge *b = PCI_BRIDGE(s);
pci_bridge_update_mappings(b);
}
memory_region_set_enabled(&s->bus_master_enable_region,
pci_get_word(s->config + PCI_COMMAND)
& PCI_COMMAND_MASTER);
g_free(config);
return 0;
}
/* just put buffer */
static void put_pci_config_device(QEMUFile *f, void *pv, size_t size)
{
const uint8_t **v = pv;
assert(size == pci_config_size(container_of(pv, PCIDevice, config)));
qemu_put_buffer(f, *v, size);
}
static VMStateInfo vmstate_info_pci_config = {
.name = "pci config",
.get = get_pci_config_device,
.put = put_pci_config_device,
};
static int get_pci_irq_state(QEMUFile *f, void *pv, size_t size)
{
PCIDevice *s = container_of(pv, PCIDevice, irq_state);
uint32_t irq_state[PCI_NUM_PINS];
int i;
for (i = 0; i < PCI_NUM_PINS; ++i) {
irq_state[i] = qemu_get_be32(f);
if (irq_state[i] != 0x1 && irq_state[i] != 0) {
fprintf(stderr, "irq state %d: must be 0 or 1.\n",
irq_state[i]);
return -EINVAL;
}
}
for (i = 0; i < PCI_NUM_PINS; ++i) {
pci_set_irq_state(s, i, irq_state[i]);
}
return 0;
}
static void put_pci_irq_state(QEMUFile *f, void *pv, size_t size)
{
int i;
PCIDevice *s = container_of(pv, PCIDevice, irq_state);
for (i = 0; i < PCI_NUM_PINS; ++i) {
qemu_put_be32(f, pci_irq_state(s, i));
}
}
static VMStateInfo vmstate_info_pci_irq_state = {
.name = "pci irq state",
.get = get_pci_irq_state,
.put = put_pci_irq_state,
};
const VMStateDescription vmstate_pci_device = {
.name = "PCIDevice",
.version_id = 2,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
VMSTATE_INT32_POSITIVE_LE(version_id, PCIDevice),
VMSTATE_BUFFER_UNSAFE_INFO(config, PCIDevice, 0,
vmstate_info_pci_config,
PCI_CONFIG_SPACE_SIZE),
VMSTATE_BUFFER_UNSAFE_INFO(irq_state, PCIDevice, 2,
vmstate_info_pci_irq_state,
PCI_NUM_PINS * sizeof(int32_t)),
VMSTATE_END_OF_LIST()
}
};
const VMStateDescription vmstate_pcie_device = {
.name = "PCIEDevice",
.version_id = 2,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
VMSTATE_INT32_POSITIVE_LE(version_id, PCIDevice),
VMSTATE_BUFFER_UNSAFE_INFO(config, PCIDevice, 0,
vmstate_info_pci_config,
PCIE_CONFIG_SPACE_SIZE),
VMSTATE_BUFFER_UNSAFE_INFO(irq_state, PCIDevice, 2,
vmstate_info_pci_irq_state,
PCI_NUM_PINS * sizeof(int32_t)),
VMSTATE_END_OF_LIST()
}
};
static inline const VMStateDescription *pci_get_vmstate(PCIDevice *s)
{
return pci_is_express(s) ? &vmstate_pcie_device : &vmstate_pci_device;
}
void pci_device_save(PCIDevice *s, QEMUFile *f)
{
/* Clear interrupt status bit: it is implicit
* in irq_state which we are saving.
* This makes us compatible with old devices
* which never set or clear this bit. */
s->config[PCI_STATUS] &= ~PCI_STATUS_INTERRUPT;
vmstate_save_state(f, pci_get_vmstate(s), s);
/* Restore the interrupt status bit. */
pci_update_irq_status(s);
}
int pci_device_load(PCIDevice *s, QEMUFile *f)
{
int ret;
ret = vmstate_load_state(f, pci_get_vmstate(s), s, s->version_id);
/* Restore the interrupt status bit. */
pci_update_irq_status(s);
return ret;
}
static void pci_set_default_subsystem_id(PCIDevice *pci_dev)
{
pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID,
pci_default_sub_vendor_id);
pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID,
pci_default_sub_device_id);
}
/*
* Parse [[<domain>:]<bus>:]<slot>, return -1 on error if funcp == NULL
* [[<domain>:]<bus>:]<slot>.<func>, return -1 on error
*/
int pci_parse_devaddr(const char *addr, int *domp, int *busp,
unsigned int *slotp, unsigned int *funcp)
{
const char *p;
char *e;
unsigned long val;
unsigned long dom = 0, bus = 0;
unsigned int slot = 0;
unsigned int func = 0;
p = addr;
val = strtoul(p, &e, 16);
if (e == p)
return -1;
if (*e == ':') {
bus = val;
p = e + 1;
val = strtoul(p, &e, 16);
if (e == p)
return -1;
if (*e == ':') {
dom = bus;
bus = val;
p = e + 1;
val = strtoul(p, &e, 16);
if (e == p)
return -1;
}
}
slot = val;
if (funcp != NULL) {
if (*e != '.')
return -1;
p = e + 1;
val = strtoul(p, &e, 16);
if (e == p)
return -1;
func = val;
}
/* if funcp == NULL func is 0 */
if (dom > 0xffff || bus > 0xff || slot > 0x1f || func > 7)
return -1;
if (*e)
return -1;
*domp = dom;
*busp = bus;
*slotp = slot;
if (funcp != NULL)
*funcp = func;
return 0;
}
PCIBus *pci_get_bus_devfn(int *devfnp, PCIBus *root, const char *devaddr)
{
int dom, bus;
unsigned slot;
if (!root) {
fprintf(stderr, "No primary PCI bus\n");
return NULL;
}
assert(!root->parent_dev);
if (!devaddr) {
*devfnp = -1;
return pci_find_bus_nr(root, 0);
}
if (pci_parse_devaddr(devaddr, &dom, &bus, &slot, NULL) < 0) {
return NULL;
}
if (dom != 0) {
fprintf(stderr, "No support for non-zero PCI domains\n");
return NULL;
}
*devfnp = PCI_DEVFN(slot, 0);
return pci_find_bus_nr(root, bus);
}
static void pci_init_cmask(PCIDevice *dev)
{
pci_set_word(dev->cmask + PCI_VENDOR_ID, 0xffff);
pci_set_word(dev->cmask + PCI_DEVICE_ID, 0xffff);
dev->cmask[PCI_STATUS] = PCI_STATUS_CAP_LIST;
dev->cmask[PCI_REVISION_ID] = 0xff;
dev->cmask[PCI_CLASS_PROG] = 0xff;
pci_set_word(dev->cmask + PCI_CLASS_DEVICE, 0xffff);
dev->cmask[PCI_HEADER_TYPE] = 0xff;
dev->cmask[PCI_CAPABILITY_LIST] = 0xff;
}
static void pci_init_wmask(PCIDevice *dev)
{
int config_size = pci_config_size(dev);
dev->wmask[PCI_CACHE_LINE_SIZE] = 0xff;
dev->wmask[PCI_INTERRUPT_LINE] = 0xff;
pci_set_word(dev->wmask + PCI_COMMAND,
PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
PCI_COMMAND_INTX_DISABLE);
if (dev->cap_present & QEMU_PCI_CAP_SERR) {
pci_word_test_and_set_mask(dev->wmask + PCI_COMMAND, PCI_COMMAND_SERR);
}
memset(dev->wmask + PCI_CONFIG_HEADER_SIZE, 0xff,
config_size - PCI_CONFIG_HEADER_SIZE);
}
static void pci_init_w1cmask(PCIDevice *dev)
{
/*
* Note: It's okay to set w1cmask even for readonly bits as
* long as their value is hardwired to 0.
*/
pci_set_word(dev->w1cmask + PCI_STATUS,
PCI_STATUS_PARITY | PCI_STATUS_SIG_TARGET_ABORT |
PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_REC_MASTER_ABORT |
PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_DETECTED_PARITY);
}
static void pci_init_mask_bridge(PCIDevice *d)
{
/* PCI_PRIMARY_BUS, PCI_SECONDARY_BUS, PCI_SUBORDINATE_BUS and
PCI_SEC_LETENCY_TIMER */
memset(d->wmask + PCI_PRIMARY_BUS, 0xff, 4);
/* base and limit */
d->wmask[PCI_IO_BASE] = PCI_IO_RANGE_MASK & 0xff;
d->wmask[PCI_IO_LIMIT] = PCI_IO_RANGE_MASK & 0xff;
pci_set_word(d->wmask + PCI_MEMORY_BASE,
PCI_MEMORY_RANGE_MASK & 0xffff);
pci_set_word(d->wmask + PCI_MEMORY_LIMIT,
PCI_MEMORY_RANGE_MASK & 0xffff);
pci_set_word(d->wmask + PCI_PREF_MEMORY_BASE,
PCI_PREF_RANGE_MASK & 0xffff);
pci_set_word(d->wmask + PCI_PREF_MEMORY_LIMIT,
PCI_PREF_RANGE_MASK & 0xffff);
/* PCI_PREF_BASE_UPPER32 and PCI_PREF_LIMIT_UPPER32 */
memset(d->wmask + PCI_PREF_BASE_UPPER32, 0xff, 8);
/* Supported memory and i/o types */
d->config[PCI_IO_BASE] |= PCI_IO_RANGE_TYPE_16;
d->config[PCI_IO_LIMIT] |= PCI_IO_RANGE_TYPE_16;
pci_word_test_and_set_mask(d->config + PCI_PREF_MEMORY_BASE,
PCI_PREF_RANGE_TYPE_64);
pci_word_test_and_set_mask(d->config + PCI_PREF_MEMORY_LIMIT,
PCI_PREF_RANGE_TYPE_64);
/*
* TODO: Bridges default to 10-bit VGA decoding but we currently only
* implement 16-bit decoding (no alias support).
*/
pci_set_word(d->wmask + PCI_BRIDGE_CONTROL,
PCI_BRIDGE_CTL_PARITY |
PCI_BRIDGE_CTL_SERR |
PCI_BRIDGE_CTL_ISA |
PCI_BRIDGE_CTL_VGA |
PCI_BRIDGE_CTL_VGA_16BIT |
PCI_BRIDGE_CTL_MASTER_ABORT |
PCI_BRIDGE_CTL_BUS_RESET |
PCI_BRIDGE_CTL_FAST_BACK |
PCI_BRIDGE_CTL_DISCARD |
PCI_BRIDGE_CTL_SEC_DISCARD |
PCI_BRIDGE_CTL_DISCARD_SERR);
/* Below does not do anything as we never set this bit, put here for
* completeness. */
pci_set_word(d->w1cmask + PCI_BRIDGE_CONTROL,
PCI_BRIDGE_CTL_DISCARD_STATUS);
d->cmask[PCI_IO_BASE] |= PCI_IO_RANGE_TYPE_MASK;
d->cmask[PCI_IO_LIMIT] |= PCI_IO_RANGE_TYPE_MASK;
pci_word_test_and_set_mask(d->cmask + PCI_PREF_MEMORY_BASE,
PCI_PREF_RANGE_TYPE_MASK);
pci_word_test_and_set_mask(d->cmask + PCI_PREF_MEMORY_LIMIT,
PCI_PREF_RANGE_TYPE_MASK);
}
static int pci_init_multifunction(PCIBus *bus, PCIDevice *dev)
{
uint8_t slot = PCI_SLOT(dev->devfn);
uint8_t func;
if (dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
dev->config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION;
}
/*
* multifunction bit is interpreted in two ways as follows.
* - all functions must set the bit to 1.
* Example: Intel X53
* - function 0 must set the bit, but the rest function (> 0)
* is allowed to leave the bit to 0.
* Example: PIIX3(also in qemu), PIIX4(also in qemu), ICH10,
*
* So OS (at least Linux) checks the bit of only function 0,
* and doesn't see the bit of function > 0.
*
* The below check allows both interpretation.
*/
if (PCI_FUNC(dev->devfn)) {
PCIDevice *f0 = bus->devices[PCI_DEVFN(slot, 0)];
if (f0 && !(f0->cap_present & QEMU_PCI_CAP_MULTIFUNCTION)) {
/* function 0 should set multifunction bit */
error_report("PCI: single function device can't be populated "
"in function %x.%x", slot, PCI_FUNC(dev->devfn));
return -1;
}
return 0;
}
if (dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
return 0;
}
/* function 0 indicates single function, so function > 0 must be NULL */
for (func = 1; func < PCI_FUNC_MAX; ++func) {
if (bus->devices[PCI_DEVFN(slot, func)]) {
error_report("PCI: %x.0 indicates single function, "
"but %x.%x is already populated.",
slot, slot, func);
return -1;
}
}
return 0;
}
static void pci_config_alloc(PCIDevice *pci_dev)
{
int config_size = pci_config_size(pci_dev);
pci_dev->config = g_malloc0(config_size);
pci_dev->cmask = g_malloc0(config_size);
pci_dev->wmask = g_malloc0(config_size);
pci_dev->w1cmask = g_malloc0(config_size);
pci_dev->used = g_malloc0(config_size);
}
static void pci_config_free(PCIDevice *pci_dev)
{
g_free(pci_dev->config);
g_free(pci_dev->cmask);
g_free(pci_dev->wmask);
g_free(pci_dev->w1cmask);
g_free(pci_dev->used);
}
static void do_pci_unregister_device(PCIDevice *pci_dev)
{
pci_dev->bus->devices[pci_dev->devfn] = NULL;
pci_config_free(pci_dev);
address_space_destroy(&pci_dev->bus_master_as);
memory_region_destroy(&pci_dev->bus_master_enable_region);
}
/* -1 for devfn means auto assign */
static PCIDevice *do_pci_register_device(PCIDevice *pci_dev, PCIBus *bus,
const char *name, int devfn)
{
PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev);
PCIConfigReadFunc *config_read = pc->config_read;
PCIConfigWriteFunc *config_write = pc->config_write;
AddressSpace *dma_as;
if (devfn < 0) {
for(devfn = bus->devfn_min ; devfn < ARRAY_SIZE(bus->devices);
devfn += PCI_FUNC_MAX) {
if (!bus->devices[devfn])
goto found;
}
error_report("PCI: no slot/function available for %s, all in use", name);
return NULL;
found: ;
} else if (bus->devices[devfn]) {
error_report("PCI: slot %d function %d not available for %s, in use by %s",
PCI_SLOT(devfn), PCI_FUNC(devfn), name, bus->devices[devfn]->name);
return NULL;
}
pci_dev->bus = bus;
pci_dev->devfn = devfn;
dma_as = pci_device_iommu_address_space(pci_dev);
memory_region_init_alias(&pci_dev->bus_master_enable_region,
OBJECT(pci_dev), "bus master",
dma_as->root, 0, memory_region_size(dma_as->root));
memory_region_set_enabled(&pci_dev->bus_master_enable_region, false);
address_space_init(&pci_dev->bus_master_as, &pci_dev->bus_master_enable_region,
name);
pstrcpy(pci_dev->name, sizeof(pci_dev->name), name);
pci_dev->irq_state = 0;
pci_config_alloc(pci_dev);
pci_config_set_vendor_id(pci_dev->config, pc->vendor_id);
pci_config_set_device_id(pci_dev->config, pc->device_id);
pci_config_set_revision(pci_dev->config, pc->revision);
pci_config_set_class(pci_dev->config, pc->class_id);
if (!pc->is_bridge) {
if (pc->subsystem_vendor_id || pc->subsystem_id) {
pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID,
pc->subsystem_vendor_id);
pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID,
pc->subsystem_id);
} else {
pci_set_default_subsystem_id(pci_dev);
}
} else {
/* subsystem_vendor_id/subsystem_id are only for header type 0 */
assert(!pc->subsystem_vendor_id);
assert(!pc->subsystem_id);
}
pci_init_cmask(pci_dev);
pci_init_wmask(pci_dev);
pci_init_w1cmask(pci_dev);
if (pc->is_bridge) {
pci_init_mask_bridge(pci_dev);
}
if (pci_init_multifunction(bus, pci_dev)) {
do_pci_unregister_device(pci_dev);
return NULL;
}
if (!config_read)
config_read = pci_default_read_config;
if (!config_write)
config_write = pci_default_write_config;
pci_dev->config_read = config_read;
pci_dev->config_write = config_write;
bus->devices[devfn] = pci_dev;
pci_dev->version_id = 2; /* Current pci device vmstate version */
return pci_dev;
}
static void pci_unregister_io_regions(PCIDevice *pci_dev)
{
PCIIORegion *r;
int i;
for(i = 0; i < PCI_NUM_REGIONS; i++) {
r = &pci_dev->io_regions[i];
if (!r->size || r->addr == PCI_BAR_UNMAPPED)
continue;
memory_region_del_subregion(r->address_space, r->memory);
}
pci_unregister_vga(pci_dev);
}
static int pci_unregister_device(DeviceState *dev)
{
PCIDevice *pci_dev = PCI_DEVICE(dev);
PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev);
pci_unregister_io_regions(pci_dev);
pci_del_option_rom(pci_dev);
if (pc->exit) {
pc->exit(pci_dev);
}
do_pci_unregister_device(pci_dev);
return 0;
}
void pci_register_bar(PCIDevice *pci_dev, int region_num,
uint8_t type, MemoryRegion *memory)
{
PCIIORegion *r;
uint32_t addr;
uint64_t wmask;
pcibus_t size = memory_region_size(memory);
assert(region_num >= 0);
assert(region_num < PCI_NUM_REGIONS);
if (size & (size-1)) {
fprintf(stderr, "ERROR: PCI region size must be pow2 "
"type=0x%x, size=0x%"FMT_PCIBUS"\n", type, size);
exit(1);
}
r = &pci_dev->io_regions[region_num];
r->addr = PCI_BAR_UNMAPPED;
r->size = size;
r->type = type;
r->memory = NULL;
wmask = ~(size - 1);
addr = pci_bar(pci_dev, region_num);
if (region_num == PCI_ROM_SLOT) {
/* ROM enable bit is writable */
wmask |= PCI_ROM_ADDRESS_ENABLE;
}
pci_set_long(pci_dev->config + addr, type);
if (!(r->type & PCI_BASE_ADDRESS_SPACE_IO) &&
r->type & PCI_BASE_ADDRESS_MEM_TYPE_64) {
pci_set_quad(pci_dev->wmask + addr, wmask);
pci_set_quad(pci_dev->cmask + addr, ~0ULL);
} else {
pci_set_long(pci_dev->wmask + addr, wmask & 0xffffffff);
pci_set_long(pci_dev->cmask + addr, 0xffffffff);
}
pci_dev->io_regions[region_num].memory = memory;
pci_dev->io_regions[region_num].address_space
= type & PCI_BASE_ADDRESS_SPACE_IO
? pci_dev->bus->address_space_io
: pci_dev->bus->address_space_mem;
}
static void pci_update_vga(PCIDevice *pci_dev)
{
uint16_t cmd;
if (!pci_dev->has_vga) {
return;
}
cmd = pci_get_word(pci_dev->config + PCI_COMMAND);
memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_MEM],
cmd & PCI_COMMAND_MEMORY);
memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO],
cmd & PCI_COMMAND_IO);
memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI],
cmd & PCI_COMMAND_IO);
}
void pci_register_vga(PCIDevice *pci_dev, MemoryRegion *mem,
MemoryRegion *io_lo, MemoryRegion *io_hi)
{
assert(!pci_dev->has_vga);
assert(memory_region_size(mem) == QEMU_PCI_VGA_MEM_SIZE);
pci_dev->vga_regions[QEMU_PCI_VGA_MEM] = mem;
memory_region_add_subregion_overlap(pci_dev->bus->address_space_mem,
QEMU_PCI_VGA_MEM_BASE, mem, 1);
assert(memory_region_size(io_lo) == QEMU_PCI_VGA_IO_LO_SIZE);
pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO] = io_lo;
memory_region_add_subregion_overlap(pci_dev->bus->address_space_io,
QEMU_PCI_VGA_IO_LO_BASE, io_lo, 1);
assert(memory_region_size(io_hi) == QEMU_PCI_VGA_IO_HI_SIZE);
pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI] = io_hi;
memory_region_add_subregion_overlap(pci_dev->bus->address_space_io,
QEMU_PCI_VGA_IO_HI_BASE, io_hi, 1);
pci_dev->has_vga = true;
pci_update_vga(pci_dev);
}
void pci_unregister_vga(PCIDevice *pci_dev)
{
if (!pci_dev->has_vga) {
return;
}
memory_region_del_subregion(pci_dev->bus->address_space_mem,
pci_dev->vga_regions[QEMU_PCI_VGA_MEM]);
memory_region_del_subregion(pci_dev->bus->address_space_io,
pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO]);
memory_region_del_subregion(pci_dev->bus->address_space_io,
pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI]);
pci_dev->has_vga = false;
}
pcibus_t pci_get_bar_addr(PCIDevice *pci_dev, int region_num)
{
return pci_dev->io_regions[region_num].addr;
}
static pcibus_t pci_bar_address(PCIDevice *d,
int reg, uint8_t type, pcibus_t size)
{
pcibus_t new_addr, last_addr;
int bar = pci_bar(d, reg);
uint16_t cmd = pci_get_word(d->config + PCI_COMMAND);
if (type & PCI_BASE_ADDRESS_SPACE_IO) {
if (!(cmd & PCI_COMMAND_IO)) {
return PCI_BAR_UNMAPPED;
}
new_addr = pci_get_long(d->config + bar) & ~(size - 1);
last_addr = new_addr + size - 1;
/* Check if 32 bit BAR wraps around explicitly.
* TODO: make priorities correct and remove this work around.
*/
if (last_addr <= new_addr || new_addr == 0 || last_addr >= UINT32_MAX) {
return PCI_BAR_UNMAPPED;
}
return new_addr;
}
if (!(cmd & PCI_COMMAND_MEMORY)) {
return PCI_BAR_UNMAPPED;
}
if (type & PCI_BASE_ADDRESS_MEM_TYPE_64) {
new_addr = pci_get_quad(d->config + bar);
} else {
new_addr = pci_get_long(d->config + bar);
}
/* the ROM slot has a specific enable bit */
if (reg == PCI_ROM_SLOT && !(new_addr & PCI_ROM_ADDRESS_ENABLE)) {
return PCI_BAR_UNMAPPED;
}
new_addr &= ~(size - 1);
last_addr = new_addr + size - 1;
/* NOTE: we do not support wrapping */
/* XXX: as we cannot support really dynamic
mappings, we handle specific values as invalid
mappings. */
if (last_addr <= new_addr || new_addr == 0 ||
last_addr == PCI_BAR_UNMAPPED) {
return PCI_BAR_UNMAPPED;
}
/* Now pcibus_t is 64bit.
* Check if 32 bit BAR wraps around explicitly.
* Without this, PC ide doesn't work well.
* TODO: remove this work around.
*/
if (!(type & PCI_BASE_ADDRESS_MEM_TYPE_64) && last_addr >= UINT32_MAX) {
return PCI_BAR_UNMAPPED;
}
/*
* OS is allowed to set BAR beyond its addressable
* bits. For example, 32 bit OS can set 64bit bar
* to >4G. Check it. TODO: we might need to support
* it in the future for e.g. PAE.
*/
if (last_addr >= HWADDR_MAX) {
return PCI_BAR_UNMAPPED;
}
return new_addr;
}
static void pci_update_mappings(PCIDevice *d)
{
PCIIORegion *r;
int i;
pcibus_t new_addr;
for(i = 0; i < PCI_NUM_REGIONS; i++) {
r = &d->io_regions[i];
/* this region isn't registered */
if (!r->size)
continue;
new_addr = pci_bar_address(d, i, r->type, r->size);
/* This bar isn't changed */
if (new_addr == r->addr)
continue;
/* now do the real mapping */
if (r->addr != PCI_BAR_UNMAPPED) {
memory_region_del_subregion(r->address_space, r->memory);
}
r->addr = new_addr;
if (r->addr != PCI_BAR_UNMAPPED) {
memory_region_add_subregion_overlap(r->address_space,
r->addr, r->memory, 1);
}
}
pci_update_vga(d);
}
static inline int pci_irq_disabled(PCIDevice *d)
{
return pci_get_word(d->config + PCI_COMMAND) & PCI_COMMAND_INTX_DISABLE;
}
/* Called after interrupt disabled field update in config space,
* assert/deassert interrupts if necessary.
* Gets original interrupt disable bit value (before update). */
static void pci_update_irq_disabled(PCIDevice *d, int was_irq_disabled)
{
int i, disabled = pci_irq_disabled(d);
if (disabled == was_irq_disabled)
return;
for (i = 0; i < PCI_NUM_PINS; ++i) {
int state = pci_irq_state(d, i);
pci_change_irq_level(d, i, disabled ? -state : state);
}
}
uint32_t pci_default_read_config(PCIDevice *d,
uint32_t address, int len)
{
uint32_t val = 0;
memcpy(&val, d->config + address, len);
return le32_to_cpu(val);
}
void pci_default_write_config(PCIDevice *d, uint32_t addr, uint32_t val_in, int l)
{
int i, was_irq_disabled = pci_irq_disabled(d);
uint32_t val = val_in;
for (i = 0; i < l; val >>= 8, ++i) {
uint8_t wmask = d->wmask[addr + i];
uint8_t w1cmask = d->w1cmask[addr + i];
assert(!(wmask & w1cmask));
d->config[addr + i] = (d->config[addr + i] & ~wmask) | (val & wmask);
d->config[addr + i] &= ~(val & w1cmask); /* W1C: Write 1 to Clear */
}
if (ranges_overlap(addr, l, PCI_BASE_ADDRESS_0, 24) ||
ranges_overlap(addr, l, PCI_ROM_ADDRESS, 4) ||
ranges_overlap(addr, l, PCI_ROM_ADDRESS1, 4) ||
range_covers_byte(addr, l, PCI_COMMAND))
pci_update_mappings(d);
if (range_covers_byte(addr, l, PCI_COMMAND)) {
pci_update_irq_disabled(d, was_irq_disabled);
memory_region_set_enabled(&d->bus_master_enable_region,
pci_get_word(d->config + PCI_COMMAND)
& PCI_COMMAND_MASTER);
}
msi_write_config(d, addr, val_in, l);
msix_write_config(d, addr, val_in, l);
}
/***********************************************************/
/* generic PCI irq support */
/* 0 <= irq_num <= 3. level must be 0 or 1 */
static void pci_irq_handler(void *opaque, int irq_num, int level)
{
PCIDevice *pci_dev = opaque;
int change;
change = level - pci_irq_state(pci_dev, irq_num);
if (!change)
return;
pci_set_irq_state(pci_dev, irq_num, level);
pci_update_irq_status(pci_dev);
if (pci_irq_disabled(pci_dev))
return;
pci_change_irq_level(pci_dev, irq_num, change);
}
static inline int pci_intx(PCIDevice *pci_dev)
{
return pci_get_byte(pci_dev->config + PCI_INTERRUPT_PIN) - 1;
}
qemu_irq pci_allocate_irq(PCIDevice *pci_dev)
{
int intx = pci_intx(pci_dev);
return qemu_allocate_irq(pci_irq_handler, pci_dev, intx);
}
void pci_set_irq(PCIDevice *pci_dev, int level)
{
int intx = pci_intx(pci_dev);
pci_irq_handler(pci_dev, intx, level);
}
/* Special hooks used by device assignment */
void pci_bus_set_route_irq_fn(PCIBus *bus, pci_route_irq_fn route_intx_to_irq)
{
assert(pci_bus_is_root(bus));
bus->route_intx_to_irq = route_intx_to_irq;
}
PCIINTxRoute pci_device_route_intx_to_irq(PCIDevice *dev, int pin)
{
PCIBus *bus;
do {
bus = dev->bus;
pin = bus->map_irq(dev, pin);
dev = bus->parent_dev;
} while (dev);
if (!bus->route_intx_to_irq) {
error_report("PCI: Bug - unimplemented PCI INTx routing (%s)",
object_get_typename(OBJECT(bus->qbus.parent)));
return (PCIINTxRoute) { PCI_INTX_DISABLED, -1 };
}
return bus->route_intx_to_irq(bus->irq_opaque, pin);
}
bool pci_intx_route_changed(PCIINTxRoute *old, PCIINTxRoute *new)
{
return old->mode != new->mode || old->irq != new->irq;
}
void pci_bus_fire_intx_routing_notifier(PCIBus *bus)
{
PCIDevice *dev;
PCIBus *sec;
int i;
for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) {
dev = bus->devices[i];
if (dev && dev->intx_routing_notifier) {
dev->intx_routing_notifier(dev);
}
}
QLIST_FOREACH(sec, &bus->child, sibling) {
pci_bus_fire_intx_routing_notifier(sec);
}
}
void pci_device_set_intx_routing_notifier(PCIDevice *dev,
PCIINTxRoutingNotifier notifier)
{
dev->intx_routing_notifier = notifier;
}
/*
* PCI-to-PCI bridge specification
* 9.1: Interrupt routing. Table 9-1
*
* the PCI Express Base Specification, Revision 2.1
* 2.2.8.1: INTx interrutp signaling - Rules
* the Implementation Note
* Table 2-20
*/
/*
* 0 <= pin <= 3 0 = INTA, 1 = INTB, 2 = INTC, 3 = INTD
* 0-origin unlike PCI interrupt pin register.
*/
int pci_swizzle_map_irq_fn(PCIDevice *pci_dev, int pin)
{
return (pin + PCI_SLOT(pci_dev->devfn)) % PCI_NUM_PINS;
}
/***********************************************************/
/* monitor info on PCI */
typedef struct {
uint16_t class;
const char *desc;
const char *fw_name;
uint16_t fw_ign_bits;
} pci_class_desc;
static const pci_class_desc pci_class_descriptions[] =
{
{ 0x0001, "VGA controller", "display"},
{ 0x0100, "SCSI controller", "scsi"},
{ 0x0101, "IDE controller", "ide"},
{ 0x0102, "Floppy controller", "fdc"},
{ 0x0103, "IPI controller", "ipi"},
{ 0x0104, "RAID controller", "raid"},
{ 0x0106, "SATA controller"},
{ 0x0107, "SAS controller"},
{ 0x0180, "Storage controller"},
{ 0x0200, "Ethernet controller", "ethernet"},
{ 0x0201, "Token Ring controller", "token-ring"},
{ 0x0202, "FDDI controller", "fddi"},
{ 0x0203, "ATM controller", "atm"},
{ 0x0280, "Network controller"},
{ 0x0300, "VGA controller", "display", 0x00ff},
{ 0x0301, "XGA controller"},
{ 0x0302, "3D controller"},
{ 0x0380, "Display controller"},
{ 0x0400, "Video controller", "video"},
{ 0x0401, "Audio controller", "sound"},
{ 0x0402, "Phone"},
{ 0x0403, "Audio controller", "sound"},
{ 0x0480, "Multimedia controller"},
{ 0x0500, "RAM controller", "memory"},
{ 0x0501, "Flash controller", "flash"},
{ 0x0580, "Memory controller"},
{ 0x0600, "Host bridge", "host"},
{ 0x0601, "ISA bridge", "isa"},
{ 0x0602, "EISA bridge", "eisa"},
{ 0x0603, "MC bridge", "mca"},
{ 0x0604, "PCI bridge", "pci-bridge"},
{ 0x0605, "PCMCIA bridge", "pcmcia"},
{ 0x0606, "NUBUS bridge", "nubus"},
{ 0x0607, "CARDBUS bridge", "cardbus"},
{ 0x0608, "RACEWAY bridge"},
{ 0x0680, "Bridge"},
{ 0x0700, "Serial port", "serial"},
{ 0x0701, "Parallel port", "parallel"},
{ 0x0800, "Interrupt controller", "interrupt-controller"},
{ 0x0801, "DMA controller", "dma-controller"},
{ 0x0802, "Timer", "timer"},
{ 0x0803, "RTC", "rtc"},
{ 0x0900, "Keyboard", "keyboard"},
{ 0x0901, "Pen", "pen"},
{ 0x0902, "Mouse", "mouse"},
{ 0x0A00, "Dock station", "dock", 0x00ff},
{ 0x0B00, "i386 cpu", "cpu", 0x00ff},
{ 0x0c00, "Fireware contorller", "fireware"},
{ 0x0c01, "Access bus controller", "access-bus"},
{ 0x0c02, "SSA controller", "ssa"},
{ 0x0c03, "USB controller", "usb"},
{ 0x0c04, "Fibre channel controller", "fibre-channel"},
{ 0x0c05, "SMBus"},
{ 0, NULL}
};
static void pci_for_each_device_under_bus(PCIBus *bus,
void (*fn)(PCIBus *b, PCIDevice *d,
void *opaque),
void *opaque)
{
PCIDevice *d;
int devfn;
for(devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) {
d = bus->devices[devfn];
if (d) {
fn(bus, d, opaque);
}
}
}
void pci_for_each_device(PCIBus *bus, int bus_num,
void (*fn)(PCIBus *b, PCIDevice *d, void *opaque),
void *opaque)
{
bus = pci_find_bus_nr(bus, bus_num);
if (bus) {
pci_for_each_device_under_bus(bus, fn, opaque);
}
}
static const pci_class_desc *get_class_desc(int class)
{
const pci_class_desc *desc;
desc = pci_class_descriptions;
while (desc->desc && class != desc->class) {
desc++;
}
return desc;
}
static PciDeviceInfoList *qmp_query_pci_devices(PCIBus *bus, int bus_num);
static PciMemoryRegionList *qmp_query_pci_regions(const PCIDevice *dev)
{
PciMemoryRegionList *head = NULL, *cur_item = NULL;
int i;
for (i = 0; i < PCI_NUM_REGIONS; i++) {
const PCIIORegion *r = &dev->io_regions[i];
PciMemoryRegionList *region;
if (!r->size) {
continue;
}
region = g_malloc0(sizeof(*region));
region->value = g_malloc0(sizeof(*region->value));
if (r->type & PCI_BASE_ADDRESS_SPACE_IO) {
region->value->type = g_strdup("io");
} else {
region->value->type = g_strdup("memory");
region->value->has_prefetch = true;
region->value->prefetch = !!(r->type & PCI_BASE_ADDRESS_MEM_PREFETCH);
region->value->has_mem_type_64 = true;
region->value->mem_type_64 = !!(r->type & PCI_BASE_ADDRESS_MEM_TYPE_64);
}
region->value->bar = i;
region->value->address = r->addr;
region->value->size = r->size;
/* XXX: waiting for the qapi to support GSList */
if (!cur_item) {
head = cur_item = region;
} else {
cur_item->next = region;
cur_item = region;
}
}
return head;
}
static PciBridgeInfo *qmp_query_pci_bridge(PCIDevice *dev, PCIBus *bus,
int bus_num)
{
PciBridgeInfo *info;
info = g_malloc0(sizeof(*info));
info->bus.number = dev->config[PCI_PRIMARY_BUS];
info->bus.secondary = dev->config[PCI_SECONDARY_BUS];
info->bus.subordinate = dev->config[PCI_SUBORDINATE_BUS];
info->bus.io_range = g_malloc0(sizeof(*info->bus.io_range));
info->bus.io_range->base = pci_bridge_get_base(dev, PCI_BASE_ADDRESS_SPACE_IO);
info->bus.io_range->limit = pci_bridge_get_limit(dev, PCI_BASE_ADDRESS_SPACE_IO);
info->bus.memory_range = g_malloc0(sizeof(*info->bus.memory_range));
info->bus.memory_range->base = pci_bridge_get_base(dev, PCI_BASE_ADDRESS_SPACE_MEMORY);
info->bus.memory_range->limit = pci_bridge_get_limit(dev, PCI_BASE_ADDRESS_SPACE_MEMORY);
info->bus.prefetchable_range = g_malloc0(sizeof(*info->bus.prefetchable_range));
info->bus.prefetchable_range->base = pci_bridge_get_base(dev, PCI_BASE_ADDRESS_MEM_PREFETCH);
info->bus.prefetchable_range->limit = pci_bridge_get_limit(dev, PCI_BASE_ADDRESS_MEM_PREFETCH);
if (dev->config[PCI_SECONDARY_BUS] != 0) {
PCIBus *child_bus = pci_find_bus_nr(bus, dev->config[PCI_SECONDARY_BUS]);
if (child_bus) {
info->has_devices = true;
info->devices = qmp_query_pci_devices(child_bus, dev->config[PCI_SECONDARY_BUS]);
}
}
return info;
}
static PciDeviceInfo *qmp_query_pci_device(PCIDevice *dev, PCIBus *bus,
int bus_num)
{
const pci_class_desc *desc;
PciDeviceInfo *info;
uint8_t type;
int class;
info = g_malloc0(sizeof(*info));
info->bus = bus_num;
info->slot = PCI_SLOT(dev->devfn);
info->function = PCI_FUNC(dev->devfn);
class = pci_get_word(dev->config + PCI_CLASS_DEVICE);
info->class_info.q_class = class;
desc = get_class_desc(class);
if (desc->desc) {
info->class_info.has_desc = true;
info->class_info.desc = g_strdup(desc->desc);
}
info->id.vendor = pci_get_word(dev->config + PCI_VENDOR_ID);
info->id.device = pci_get_word(dev->config + PCI_DEVICE_ID);
info->regions = qmp_query_pci_regions(dev);
info->qdev_id = g_strdup(dev->qdev.id ? dev->qdev.id : "");
if (dev->config[PCI_INTERRUPT_PIN] != 0) {
info->has_irq = true;
info->irq = dev->config[PCI_INTERRUPT_LINE];
}
type = dev->config[PCI_HEADER_TYPE] & ~PCI_HEADER_TYPE_MULTI_FUNCTION;
if (type == PCI_HEADER_TYPE_BRIDGE) {
info->has_pci_bridge = true;
info->pci_bridge = qmp_query_pci_bridge(dev, bus, bus_num);
}
return info;
}
static PciDeviceInfoList *qmp_query_pci_devices(PCIBus *bus, int bus_num)
{
PciDeviceInfoList *info, *head = NULL, *cur_item = NULL;
PCIDevice *dev;
int devfn;
for (devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) {
dev = bus->devices[devfn];
if (dev) {
info = g_malloc0(sizeof(*info));
info->value = qmp_query_pci_device(dev, bus, bus_num);
/* XXX: waiting for the qapi to support GSList */
if (!cur_item) {
head = cur_item = info;
} else {
cur_item->next = info;
cur_item = info;
}
}
}
return head;
}
static PciInfo *qmp_query_pci_bus(PCIBus *bus, int bus_num)
{
PciInfo *info = NULL;
bus = pci_find_bus_nr(bus, bus_num);
if (bus) {
info = g_malloc0(sizeof(*info));
info->bus = bus_num;
info->devices = qmp_query_pci_devices(bus, bus_num);
}
return info;
}
PciInfoList *qmp_query_pci(Error **errp)
{
PciInfoList *info, *head = NULL, *cur_item = NULL;
PCIHostState *host_bridge;
QLIST_FOREACH(host_bridge, &pci_host_bridges, next) {
info = g_malloc0(sizeof(*info));
info->value = qmp_query_pci_bus(host_bridge->bus, 0);
/* XXX: waiting for the qapi to support GSList */
if (!cur_item) {
head = cur_item = info;
} else {
cur_item->next = info;
cur_item = info;
}
}
return head;
}
static const char * const pci_nic_models[] = {
"ne2k_pci",
"i82551",
"i82557b",
"i82559er",
"rtl8139",
"e1000",
"pcnet",
"virtio",
NULL
};
static const char * const pci_nic_names[] = {
"ne2k_pci",
"i82551",
"i82557b",
"i82559er",
"rtl8139",
"e1000",
"pcnet",
"virtio-net-pci",
NULL
};
/* Initialize a PCI NIC. */
/* FIXME callers should check for failure, but don't */
PCIDevice *pci_nic_init(NICInfo *nd, PCIBus *rootbus,
const char *default_model,
const char *default_devaddr)
{
const char *devaddr = nd->devaddr ? nd->devaddr : default_devaddr;
PCIBus *bus;
int devfn;
PCIDevice *pci_dev;
DeviceState *dev;
int i;
i = qemu_find_nic_model(nd, pci_nic_models, default_model);
if (i < 0)
return NULL;
bus = pci_get_bus_devfn(&devfn, rootbus, devaddr);
if (!bus) {
error_report("Invalid PCI device address %s for device %s",
devaddr, pci_nic_names[i]);
return NULL;
}
pci_dev = pci_create(bus, devfn, pci_nic_names[i]);
dev = &pci_dev->qdev;
qdev_set_nic_properties(dev, nd);
if (qdev_init(dev) < 0)
return NULL;
return pci_dev;
}
PCIDevice *pci_nic_init_nofail(NICInfo *nd, PCIBus *rootbus,
const char *default_model,
const char *default_devaddr)
{
PCIDevice *res;
if (qemu_show_nic_models(nd->model, pci_nic_models))
exit(0);
res = pci_nic_init(nd, rootbus, default_model, default_devaddr);
if (!res)
exit(1);
return res;
}
PCIDevice *pci_vga_init(PCIBus *bus)
{
switch (vga_interface_type) {
case VGA_CIRRUS:
return pci_create_simple(bus, -1, "cirrus-vga");
case VGA_QXL:
return pci_create_simple(bus, -1, "qxl-vga");
case VGA_STD:
return pci_create_simple(bus, -1, "VGA");
case VGA_VMWARE:
return pci_create_simple(bus, -1, "vmware-svga");
case VGA_NONE:
default: /* Other non-PCI types. Checking for unsupported types is already
done in vl.c. */
return NULL;
}
}
/* Whether a given bus number is in range of the secondary
* bus of the given bridge device. */
static bool pci_secondary_bus_in_range(PCIDevice *dev, int bus_num)
{
return !(pci_get_word(dev->config + PCI_BRIDGE_CONTROL) &
PCI_BRIDGE_CTL_BUS_RESET) /* Don't walk the bus if it's reset. */ &&
dev->config[PCI_SECONDARY_BUS] < bus_num &&
bus_num <= dev->config[PCI_SUBORDINATE_BUS];
}
static PCIBus *pci_find_bus_nr(PCIBus *bus, int bus_num)
{
PCIBus *sec;
if (!bus) {
return NULL;
}
if (pci_bus_num(bus) == bus_num) {
return bus;
}
/* Consider all bus numbers in range for the host pci bridge. */
if (!pci_bus_is_root(bus) &&
!pci_secondary_bus_in_range(bus->parent_dev, bus_num)) {
return NULL;
}
/* try child bus */
for (; bus; bus = sec) {
QLIST_FOREACH(sec, &bus->child, sibling) {
assert(!pci_bus_is_root(sec));
if (sec->parent_dev->config[PCI_SECONDARY_BUS] == bus_num) {
return sec;
}
if (pci_secondary_bus_in_range(sec->parent_dev, bus_num)) {
break;
}
}
}
return NULL;
}
void pci_for_each_bus_depth_first(PCIBus *bus,
void *(*begin)(PCIBus *bus, void *parent_state),
void (*end)(PCIBus *bus, void *state),
void *parent_state)
{
PCIBus *sec;
void *state;
if (!bus) {
return;
}
if (begin) {
state = begin(bus, parent_state);
} else {
state = parent_state;
}
QLIST_FOREACH(sec, &bus->child, sibling) {
pci_for_each_bus_depth_first(sec, begin, end, state);
}
if (end) {
end(bus, state);
}
}
PCIDevice *pci_find_device(PCIBus *bus, int bus_num, uint8_t devfn)
{
bus = pci_find_bus_nr(bus, bus_num);
if (!bus)
return NULL;
return bus->devices[devfn];
}
static int pci_qdev_init(DeviceState *qdev)
{
PCIDevice *pci_dev = (PCIDevice *)qdev;
PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev);
PCIBus *bus;
int rc;
bool is_default_rom;
/* initialize cap_present for pci_is_express() and pci_config_size() */
if (pc->is_express) {
pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS;
}
bus = PCI_BUS(qdev_get_parent_bus(qdev));
pci_dev = do_pci_register_device(pci_dev, bus,
object_get_typename(OBJECT(qdev)),
pci_dev->devfn);
if (pci_dev == NULL)
return -1;
if (pc->init) {
rc = pc->init(pci_dev);
if (rc != 0) {
do_pci_unregister_device(pci_dev);
return rc;
}
}
/* rom loading */
is_default_rom = false;
if (pci_dev->romfile == NULL && pc->romfile != NULL) {
pci_dev->romfile = g_strdup(pc->romfile);
is_default_rom = true;
}
pci_add_option_rom(pci_dev, is_default_rom);
return 0;
}
PCIDevice *pci_create_multifunction(PCIBus *bus, int devfn, bool multifunction,
const char *name)
{
DeviceState *dev;
dev = qdev_create(&bus->qbus, name);
qdev_prop_set_int32(dev, "addr", devfn);
qdev_prop_set_bit(dev, "multifunction", multifunction);
return PCI_DEVICE(dev);
}
PCIDevice *pci_create_simple_multifunction(PCIBus *bus, int devfn,
bool multifunction,
const char *name)
{
PCIDevice *dev = pci_create_multifunction(bus, devfn, multifunction, name);
qdev_init_nofail(&dev->qdev);
return dev;
}
PCIDevice *pci_create(PCIBus *bus, int devfn, const char *name)
{
return pci_create_multifunction(bus, devfn, false, name);
}
PCIDevice *pci_create_simple(PCIBus *bus, int devfn, const char *name)
{
return pci_create_simple_multifunction(bus, devfn, false, name);
}
static uint8_t pci_find_space(PCIDevice *pdev, uint8_t size)
{
int offset = PCI_CONFIG_HEADER_SIZE;
int i;
for (i = PCI_CONFIG_HEADER_SIZE; i < PCI_CONFIG_SPACE_SIZE; ++i) {
if (pdev->used[i])
offset = i + 1;
else if (i - offset + 1 == size)
return offset;
}
return 0;
}
static uint8_t pci_find_capability_list(PCIDevice *pdev, uint8_t cap_id,
uint8_t *prev_p)
{
uint8_t next, prev;
if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST))
return 0;
for (prev = PCI_CAPABILITY_LIST; (next = pdev->config[prev]);
prev = next + PCI_CAP_LIST_NEXT)
if (pdev->config[next + PCI_CAP_LIST_ID] == cap_id)
break;
if (prev_p)
*prev_p = prev;
return next;
}
static uint8_t pci_find_capability_at_offset(PCIDevice *pdev, uint8_t offset)
{
uint8_t next, prev, found = 0;
if (!(pdev->used[offset])) {
return 0;
}
assert(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST);
for (prev = PCI_CAPABILITY_LIST; (next = pdev->config[prev]);
prev = next + PCI_CAP_LIST_NEXT) {
if (next <= offset && next > found) {
found = next;
}
}
return found;
}
/* Patch the PCI vendor and device ids in a PCI rom image if necessary.
This is needed for an option rom which is used for more than one device. */
static void pci_patch_ids(PCIDevice *pdev, uint8_t *ptr, int size)
{
uint16_t vendor_id;
uint16_t device_id;
uint16_t rom_vendor_id;
uint16_t rom_device_id;
uint16_t rom_magic;
uint16_t pcir_offset;
uint8_t checksum;
/* Words in rom data are little endian (like in PCI configuration),
so they can be read / written with pci_get_word / pci_set_word. */
/* Only a valid rom will be patched. */
rom_magic = pci_get_word(ptr);
if (rom_magic != 0xaa55) {
PCI_DPRINTF("Bad ROM magic %04x\n", rom_magic);
return;
}
pcir_offset = pci_get_word(ptr + 0x18);
if (pcir_offset + 8 >= size || memcmp(ptr + pcir_offset, "PCIR", 4)) {
PCI_DPRINTF("Bad PCIR offset 0x%x or signature\n", pcir_offset);
return;
}
vendor_id = pci_get_word(pdev->config + PCI_VENDOR_ID);
device_id = pci_get_word(pdev->config + PCI_DEVICE_ID);
rom_vendor_id = pci_get_word(ptr + pcir_offset + 4);
rom_device_id = pci_get_word(ptr + pcir_offset + 6);
PCI_DPRINTF("%s: ROM id %04x%04x / PCI id %04x%04x\n", pdev->romfile,
vendor_id, device_id, rom_vendor_id, rom_device_id);
checksum = ptr[6];
if (vendor_id != rom_vendor_id) {
/* Patch vendor id and checksum (at offset 6 for etherboot roms). */
checksum += (uint8_t)rom_vendor_id + (uint8_t)(rom_vendor_id >> 8);
checksum -= (uint8_t)vendor_id + (uint8_t)(vendor_id >> 8);
PCI_DPRINTF("ROM checksum %02x / %02x\n", ptr[6], checksum);
ptr[6] = checksum;
pci_set_word(ptr + pcir_offset + 4, vendor_id);
}
if (device_id != rom_device_id) {
/* Patch device id and checksum (at offset 6 for etherboot roms). */
checksum += (uint8_t)rom_device_id + (uint8_t)(rom_device_id >> 8);
checksum -= (uint8_t)device_id + (uint8_t)(device_id >> 8);
PCI_DPRINTF("ROM checksum %02x / %02x\n", ptr[6], checksum);
ptr[6] = checksum;
pci_set_word(ptr + pcir_offset + 6, device_id);
}
}
/* Add an option rom for the device */
static int pci_add_option_rom(PCIDevice *pdev, bool is_default_rom)
{
int size;
char *path;
void *ptr;
char name[32];
const VMStateDescription *vmsd;
if (!pdev->romfile)
return 0;
if (strlen(pdev->romfile) == 0)
return 0;
if (!pdev->rom_bar) {
/*
* Load rom via fw_cfg instead of creating a rom bar,
* for 0.11 compatibility.
*/
int class = pci_get_word(pdev->config + PCI_CLASS_DEVICE);
if (class == 0x0300) {
rom_add_vga(pdev->romfile);
} else {
rom_add_option(pdev->romfile, -1);
}
return 0;
}
path = qemu_find_file(QEMU_FILE_TYPE_BIOS, pdev->romfile);
if (path == NULL) {
path = g_strdup(pdev->romfile);
}
size = get_image_size(path);
if (size < 0) {
error_report("%s: failed to find romfile \"%s\"",
__func__, pdev->romfile);
g_free(path);
return -1;
} else if (size == 0) {
error_report("%s: ignoring empty romfile \"%s\"",
__func__, pdev->romfile);
g_free(path);
return -1;
}
if (size & (size - 1)) {
size = 1 << qemu_fls(size);
}
vmsd = qdev_get_vmsd(DEVICE(pdev));
if (vmsd) {
snprintf(name, sizeof(name), "%s.rom", vmsd->name);
} else {
snprintf(name, sizeof(name), "%s.rom", object_get_typename(OBJECT(pdev)));
}
pdev->has_rom = true;
memory_region_init_ram(&pdev->rom, OBJECT(pdev), name, size);
vmstate_register_ram(&pdev->rom, &pdev->qdev);
ptr = memory_region_get_ram_ptr(&pdev->rom);
load_image(path, ptr);
g_free(path);
if (is_default_rom) {
/* Only the default rom images will be patched (if needed). */
pci_patch_ids(pdev, ptr, size);
}
pci_register_bar(pdev, PCI_ROM_SLOT, 0, &pdev->rom);
return 0;
}
static void pci_del_option_rom(PCIDevice *pdev)
{
if (!pdev->has_rom)
return;
vmstate_unregister_ram(&pdev->rom, &pdev->qdev);
memory_region_destroy(&pdev->rom);
pdev->has_rom = false;
}
/*
* if !offset
* Reserve space and add capability to the linked list in pci config space
*
* if offset = 0,
* Find and reserve space and add capability to the linked list
* in pci config space */
int pci_add_capability(PCIDevice *pdev, uint8_t cap_id,
uint8_t offset, uint8_t size)
{
int ret;
Error *local_err = NULL;
ret = pci_add_capability2(pdev, cap_id, offset, size, &local_err);
if (local_err) {
assert(ret < 0);
error_report("%s", error_get_pretty(local_err));
error_free(local_err);
} else {
/* success implies a positive offset in config space */
assert(ret > 0);
}
return ret;
}
int pci_add_capability2(PCIDevice *pdev, uint8_t cap_id,
uint8_t offset, uint8_t size,
Error **errp)
{
uint8_t *config;
int i, overlapping_cap;
if (!offset) {
offset = pci_find_space(pdev, size);
if (!offset) {
error_setg(errp, "out of PCI config space");
return -ENOSPC;
}
} else {
/* Verify that capabilities don't overlap. Note: device assignment
* depends on this check to verify that the device is not broken.
* Should never trigger for emulated devices, but it's helpful
* for debugging these. */
for (i = offset; i < offset + size; i++) {
overlapping_cap = pci_find_capability_at_offset(pdev, i);
if (overlapping_cap) {
error_setg(errp, "%s:%02x:%02x.%x "
"Attempt to add PCI capability %x at offset "
"%x overlaps existing capability %x at offset %x",
pci_root_bus_path(pdev), pci_bus_num(pdev->bus),
PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
cap_id, offset, overlapping_cap, i);
return -EINVAL;
}
}
}
config = pdev->config + offset;
config[PCI_CAP_LIST_ID] = cap_id;
config[PCI_CAP_LIST_NEXT] = pdev->config[PCI_CAPABILITY_LIST];
pdev->config[PCI_CAPABILITY_LIST] = offset;
pdev->config[PCI_STATUS] |= PCI_STATUS_CAP_LIST;
memset(pdev->used + offset, 0xFF, QEMU_ALIGN_UP(size, 4));
/* Make capability read-only by default */
memset(pdev->wmask + offset, 0, size);
/* Check capability by default */
memset(pdev->cmask + offset, 0xFF, size);
return offset;
}
/* Unlink capability from the pci config space. */
void pci_del_capability(PCIDevice *pdev, uint8_t cap_id, uint8_t size)
{
uint8_t prev, offset = pci_find_capability_list(pdev, cap_id, &prev);
if (!offset)
return;
pdev->config[prev] = pdev->config[offset + PCI_CAP_LIST_NEXT];
/* Make capability writable again */
memset(pdev->wmask + offset, 0xff, size);
memset(pdev->w1cmask + offset, 0, size);
/* Clear cmask as device-specific registers can't be checked */
memset(pdev->cmask + offset, 0, size);
memset(pdev->used + offset, 0, QEMU_ALIGN_UP(size, 4));
if (!pdev->config[PCI_CAPABILITY_LIST])
pdev->config[PCI_STATUS] &= ~PCI_STATUS_CAP_LIST;
}
uint8_t pci_find_capability(PCIDevice *pdev, uint8_t cap_id)
{
return pci_find_capability_list(pdev, cap_id, NULL);
}
static void pcibus_dev_print(Monitor *mon, DeviceState *dev, int indent)
{
PCIDevice *d = (PCIDevice *)dev;
const pci_class_desc *desc;
char ctxt[64];
PCIIORegion *r;
int i, class;
class = pci_get_word(d->config + PCI_CLASS_DEVICE);
desc = pci_class_descriptions;
while (desc->desc && class != desc->class)
desc++;
if (desc->desc) {
snprintf(ctxt, sizeof(ctxt), "%s", desc->desc);
} else {
snprintf(ctxt, sizeof(ctxt), "Class %04x", class);
}
monitor_printf(mon, "%*sclass %s, addr %02x:%02x.%x, "
"pci id %04x:%04x (sub %04x:%04x)\n",
indent, "", ctxt, pci_bus_num(d->bus),
PCI_SLOT(d->devfn), PCI_FUNC(d->devfn),
pci_get_word(d->config + PCI_VENDOR_ID),
pci_get_word(d->config + PCI_DEVICE_ID),
pci_get_word(d->config + PCI_SUBSYSTEM_VENDOR_ID),
pci_get_word(d->config + PCI_SUBSYSTEM_ID));
for (i = 0; i < PCI_NUM_REGIONS; i++) {
r = &d->io_regions[i];
if (!r->size)
continue;
monitor_printf(mon, "%*sbar %d: %s at 0x%"FMT_PCIBUS
" [0x%"FMT_PCIBUS"]\n",
indent, "",
i, r->type & PCI_BASE_ADDRESS_SPACE_IO ? "i/o" : "mem",
r->addr, r->addr + r->size - 1);
}
}
static char *pci_dev_fw_name(DeviceState *dev, char *buf, int len)
{
PCIDevice *d = (PCIDevice *)dev;
const char *name = NULL;
const pci_class_desc *desc = pci_class_descriptions;
int class = pci_get_word(d->config + PCI_CLASS_DEVICE);
while (desc->desc &&
(class & ~desc->fw_ign_bits) !=
(desc->class & ~desc->fw_ign_bits)) {
desc++;
}
if (desc->desc) {
name = desc->fw_name;
}
if (name) {
pstrcpy(buf, len, name);
} else {
snprintf(buf, len, "pci%04x,%04x",
pci_get_word(d->config + PCI_VENDOR_ID),
pci_get_word(d->config + PCI_DEVICE_ID));
}
return buf;
}
static char *pcibus_get_fw_dev_path(DeviceState *dev)
{
PCIDevice *d = (PCIDevice *)dev;
char path[50], name[33];
int off;
off = snprintf(path, sizeof(path), "%s@%x",
pci_dev_fw_name(dev, name, sizeof name),
PCI_SLOT(d->devfn));
if (PCI_FUNC(d->devfn))
snprintf(path + off, sizeof(path) + off, ",%x", PCI_FUNC(d->devfn));
return g_strdup(path);
}
static char *pcibus_get_dev_path(DeviceState *dev)
{
PCIDevice *d = container_of(dev, PCIDevice, qdev);
PCIDevice *t;
int slot_depth;
/* Path format: Domain:00:Slot.Function:Slot.Function....:Slot.Function.
* 00 is added here to make this format compatible with
* domain:Bus:Slot.Func for systems without nested PCI bridges.
* Slot.Function list specifies the slot and function numbers for all
* devices on the path from root to the specific device. */
const char *root_bus_path;
int root_bus_len;
char slot[] = ":SS.F";
int slot_len = sizeof slot - 1 /* For '\0' */;
int path_len;
char *path, *p;
int s;
root_bus_path = pci_root_bus_path(d);
root_bus_len = strlen(root_bus_path);
/* Calculate # of slots on path between device and root. */;
slot_depth = 0;
for (t = d; t; t = t->bus->parent_dev) {
++slot_depth;
}
path_len = root_bus_len + slot_len * slot_depth;
/* Allocate memory, fill in the terminating null byte. */
path = g_malloc(path_len + 1 /* For '\0' */);
path[path_len] = '\0';
memcpy(path, root_bus_path, root_bus_len);
/* Fill in slot numbers. We walk up from device to root, so need to print
* them in the reverse order, last to first. */
p = path + path_len;
for (t = d; t; t = t->bus->parent_dev) {
p -= slot_len;
s = snprintf(slot, sizeof slot, ":%02x.%x",
PCI_SLOT(t->devfn), PCI_FUNC(t->devfn));
assert(s == slot_len);
memcpy(p, slot, slot_len);
}
return path;
}
static int pci_qdev_find_recursive(PCIBus *bus,
const char *id, PCIDevice **pdev)
{
DeviceState *qdev = qdev_find_recursive(&bus->qbus, id);
if (!qdev) {
return -ENODEV;
}
/* roughly check if given qdev is pci device */
if (object_dynamic_cast(OBJECT(qdev), TYPE_PCI_DEVICE)) {
*pdev = PCI_DEVICE(qdev);
return 0;
}
return -EINVAL;
}
int pci_qdev_find_device(const char *id, PCIDevice **pdev)
{
PCIHostState *host_bridge;
int rc = -ENODEV;
QLIST_FOREACH(host_bridge, &pci_host_bridges, next) {
int tmp = pci_qdev_find_recursive(host_bridge->bus, id, pdev);
if (!tmp) {
rc = 0;
break;
}
if (tmp != -ENODEV) {
rc = tmp;
}
}
return rc;
}
MemoryRegion *pci_address_space(PCIDevice *dev)
{
return dev->bus->address_space_mem;
}
MemoryRegion *pci_address_space_io(PCIDevice *dev)
{
return dev->bus->address_space_io;
}
static void pci_device_class_init(ObjectClass *klass, void *data)
{
DeviceClass *k = DEVICE_CLASS(klass);
k->init = pci_qdev_init;
k->exit = pci_unregister_device;
k->bus_type = TYPE_PCI_BUS;
k->props = pci_props;
}
AddressSpace *pci_device_iommu_address_space(PCIDevice *dev)
{
PCIBus *bus = PCI_BUS(dev->bus);
if (bus->iommu_fn) {
return bus->iommu_fn(bus, bus->iommu_opaque, dev->devfn);
}
if (bus->parent_dev) {
/** We are ignoring the bus master DMA bit of the bridge
* as it would complicate things such as VFIO for no good reason */
return pci_device_iommu_address_space(bus->parent_dev);
}
return &address_space_memory;
}
void pci_setup_iommu(PCIBus *bus, PCIIOMMUFunc fn, void *opaque)
{
bus->iommu_fn = fn;
bus->iommu_opaque = opaque;
}
static void pci_dev_get_w64(PCIBus *b, PCIDevice *dev, void *opaque)
{
Range *range = opaque;
PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(dev);
uint16_t cmd = pci_get_word(dev->config + PCI_COMMAND);
int i;
if (!(cmd & PCI_COMMAND_MEMORY)) {
return;
}
if (pc->is_bridge) {
pcibus_t base = pci_bridge_get_base(dev, PCI_BASE_ADDRESS_MEM_PREFETCH);
pcibus_t limit = pci_bridge_get_limit(dev, PCI_BASE_ADDRESS_MEM_PREFETCH);
base = MAX(base, 0x1ULL << 32);
if (limit >= base) {
Range pref_range;
pref_range.begin = base;
pref_range.end = limit + 1;
range_extend(range, &pref_range);
}
}
for (i = 0; i < PCI_NUM_REGIONS; ++i) {
PCIIORegion *r = &dev->io_regions[i];
Range region_range;
if (!r->size ||
(r->type & PCI_BASE_ADDRESS_SPACE_IO) ||
!(r->type & PCI_BASE_ADDRESS_MEM_TYPE_64)) {
continue;
}
region_range.begin = pci_bar_address(dev, i, r->type, r->size);
region_range.end = region_range.begin + r->size;
if (region_range.begin == PCI_BAR_UNMAPPED) {
continue;
}
region_range.begin = MAX(region_range.begin, 0x1ULL << 32);
if (region_range.end - 1 >= region_range.begin) {
range_extend(range, ®ion_range);
}
}
}
void pci_bus_get_w64_range(PCIBus *bus, Range *range)
{
range->begin = range->end = 0;
pci_for_each_device_under_bus(bus, pci_dev_get_w64, range);
}
static const TypeInfo pci_device_type_info = {
.name = TYPE_PCI_DEVICE,
.parent = TYPE_DEVICE,
.instance_size = sizeof(PCIDevice),
.abstract = true,
.class_size = sizeof(PCIDeviceClass),
.class_init = pci_device_class_init,
};
static void pci_register_types(void)
{
type_register_static(&pci_bus_info);
type_register_static(&pcie_bus_info);
type_register_static(&pci_device_type_info);
}
type_init(pci_register_types)
| {
"pile_set_name": "Github"
} |
/dts-v1/;
/include/ "bcm6368.dtsi"
/ {
model = "Huawei HG655b";
compatible = "huawei,hg655b", "brcm,bcm6368";
gpio-keys-polled {
compatible = "gpio-keys-polled";
#address-cells = <1>;
#size-cells = <0>;
poll-interval = <20>;
debounce-interval = <60>;
wps {
label = "wps";
gpios = <&gpio0 12 1>;
linux,code = <0x211>;
};
wlan {
label = "wlan";
gpios = <&gpio0 23 1>;
linux,code = <0xee>;
};
reset {
label = "reset";
gpios = <&gpio1 2 1>;
linux,code = <0x198>;
};
};
gpio-leds {
compatible = "gpio-leds";
dsl_green {
label = "HW65x:green:dsl";
gpios = <&gpio0 2 1>;
};
internet_green {
label = "HW65x:green:internet";
gpios = <&gpio0 5 1>;
};
lan1_green {
label = "HW65x:green:lan1";
gpios = <&gpio0 6 1>;
};
lan2_green {
label = "HW65x:green:lan2";
gpios = <&gpio0 7 1>;
};
lan3_green {
label = "HW65x:green:lan3";
gpios = <&gpio0 8 1>;
};
lan4_green {
label = "HW65x:green:lan4";
gpios = <&gpio0 9 1>;
};
usb_green {
label = "HW65x:green:usb";
gpios = <&gpio0 14 1>;
};
power_green {
label = "HW65x:green:power";
gpios = <&gpio0 22 1>;
default-state = "on";
};
voip_green {
label = "HW65x:green:voip";
gpios = <&gpio0 25 1>;
};
wps_green {
label = "HW65x:green:wps";
gpios = <&gpio0 27 1>;
};
};
};
&pflash {
status = "ok";
linux,part-probe = "bcm63xxpart";
cfe@0 {
label = "CFE";
reg = <0x000000 0x020000>;
};
linux@20000 {
label = "linux";
reg = <0x020000 0x770000>;
};
board_data@790000 {
label = "board_data";
reg = <0x790000 0x030000>;
};
cal_data@7c0000 {
label = "cal_data";
reg = <0x7c0000 0x020000>;
read-only;
};
nvram@7d0000 {
label = "nvram";
reg = <0x7e0000 0x020000>;
};
};
| {
"pile_set_name": "Github"
} |
// global variables
const sliderView = document.querySelector('.ac-slider--view > ul'),
sliderViewSlides = document.querySelectorAll('.ac-slider--view__slides'),
arrowLeft = document.querySelector('.ac-slider--arrows__left'),
arrowRight = document.querySelector('.ac-slider--arrows__right'),
sliderLength = sliderViewSlides.length;
// sliding function
const slideMe = (sliderViewItems, isActiveItem) => {
// update the classes
isActiveItem.classList.remove('is-active');
sliderViewItems.classList.add('is-active');
// css transform the active slide position
sliderView.setAttribute('style', 'transform:translateX(-' + sliderViewItems.offsetLeft + 'px)');
}
// before sliding function
const beforeSliding = i => {
let isActiveItem = document.querySelector('.ac-slider--view__slides.is-active'),
currentItem = Array.from(sliderViewSlides).indexOf(isActiveItem) + i,
nextItem = currentItem + i,
sliderViewItems = document.querySelector(`.ac-slider--view__slides:nth-child(${nextItem})`);
// if nextItem is bigger than the # of slides
if (nextItem > sliderLength) {
sliderViewItems = document.querySelector('.ac-slider--view__slides:nth-child(1)');
}
// if nextItem is 0
if (nextItem == 0) {
sliderViewItems = document.querySelector(`.ac-slider--view__slides:nth-child(${sliderLength})`);
}
// trigger the sliding method
slideMe(sliderViewItems, isActiveItem);
}
// triggers arrows
arrowRight.addEventListener('click', () => beforeSliding(1));
arrowLeft.addEventListener('click', () => beforeSliding(0)); | {
"pile_set_name": "Github"
} |
# frozen_string_literal: true
require "spec_helper"
require "integration/support/server"
describe Savon::WSDLRequest do
let(:globals) { Savon::GlobalOptions.new }
let(:http_request) { HTTPI::Request.new }
let(:ciphers) { OpenSSL::Cipher.ciphers }
def new_wsdl_request
Savon::WSDLRequest.new(globals, http_request)
end
describe "#build" do
it "returns an HTTPI::Request" do
wsdl_request = Savon::WSDLRequest.new(globals)
expect(wsdl_request.build).to be_an(HTTPI::Request)
end
describe "headers" do
it "are set when specified" do
globals.headers("Proxy-Authorization" => "Basic auth")
configured_http_request = new_wsdl_request.build
expect(configured_http_request.headers["Proxy-Authorization"]).to eq("Basic auth")
end
it "are not set otherwise" do
configured_http_request = new_wsdl_request.build
expect(configured_http_request.headers).to_not include("Proxy-Authorization")
end
end
describe "proxy" do
it "is set when specified" do
globals.proxy("http://proxy.example.com")
http_request.expects(:proxy=).with("http://proxy.example.com")
new_wsdl_request.build
end
it "is not set otherwise" do
http_request.expects(:proxy=).never
new_wsdl_request.build
end
end
describe "open timeout" do
it "is set when specified" do
globals.open_timeout(22)
http_request.expects(:open_timeout=).with(22)
new_wsdl_request.build
end
it "is not set otherwise" do
http_request.expects(:open_timeout=).never
new_wsdl_request.build
end
end
describe "read timeout" do
it "is set when specified" do
globals.read_timeout(33)
http_request.expects(:read_timeout=).with(33)
new_wsdl_request.build
end
it "is not set otherwise" do
http_request.expects(:read_timeout=).never
new_wsdl_request.build
end
end
describe "write timeout" do
it "is set when specified" do
globals.write_timeout(44)
http_request.expects(:write_timeout=).with(44)
new_wsdl_request.build
end
it "is not set otherwise" do
http_request.expects(:read_timeout=).never
new_wsdl_request.build
end
end
describe "ssl version" do
it "is set when specified" do
globals.ssl_version(:TLSv1)
http_request.auth.ssl.expects(:ssl_version=).with(:TLSv1)
new_wsdl_request.build
end
it "is not set otherwise" do
http_request.auth.ssl.expects(:ssl_version=).never
new_wsdl_request.build
end
end
describe "ssl min_version" do
it "is set when specified" do
globals.ssl_min_version(:TLS1_2)
http_request.auth.ssl.expects(:min_version=).with(:TLS1_2)
new_wsdl_request.build
end
it "is not set otherwise" do
http_request.auth.ssl.expects(:min_version=).never
new_wsdl_request.build
end
end
describe "ssl max_version" do
it "is set when specified" do
globals.ssl_max_version(:TLS1_2)
http_request.auth.ssl.expects(:max_version=).with(:TLS1_2)
new_wsdl_request.build
end
it "is not set otherwise" do
http_request.auth.ssl.expects(:max_version=).never
new_wsdl_request.build
end
end
describe "ssl verify mode" do
it "is set when specified" do
globals.ssl_verify_mode(:peer)
http_request.auth.ssl.expects(:verify_mode=).with(:peer)
new_wsdl_request.build
end
it "is not set otherwise" do
http_request.auth.ssl.expects(:verify_mode=).never
new_wsdl_request.build
end
end
describe "ssl ciphers" do
it "is set when specified" do
globals.ssl_ciphers(ciphers)
http_request.auth.ssl.expects(:ciphers=).with(ciphers)
new_wsdl_request.build
end
it "is not set otherwise" do
http_request.auth.ssl.expects(:ciphers=).never
new_wsdl_request.build
end
end
describe "ssl cert key file" do
it "is set when specified" do
cert_key = File.expand_path("../../fixtures/ssl/client_key.pem", __FILE__)
globals.ssl_cert_key_file(cert_key)
http_request.auth.ssl.expects(:cert_key_file=).with(cert_key)
new_wsdl_request.build
end
it "is not set otherwise" do
http_request.auth.ssl.expects(:cert_key_file=).never
new_wsdl_request.build
end
end
describe "ssl cert key password" do
it "is set when specified" do
the_pass = "secure-password!42"
globals.ssl_cert_key_password(the_pass)
http_request.auth.ssl.expects(:cert_key_password=).with(the_pass)
new_wsdl_request.build
end
it "is not set otherwise" do
http_request.auth.ssl.expects(:cert_key_password=).never
new_wsdl_request.build
end
end
describe "ssl encrypted cert key file" do
describe "set with an invalid decrypting password" do
it "fails when attempting to use the SSL private key" do
pass = "wrong-password"
key = File.expand_path("../../fixtures/ssl/client_encrypted_key.pem", __FILE__)
cert = File.expand_path("../../fixtures/ssl/client_encrypted_key_cert.pem", __FILE__)
globals.ssl_cert_file(cert)
globals.ssl_cert_key_password(pass)
globals.ssl_cert_key_file(key)
new_wsdl_request.build
expect { http_request.auth.ssl.cert_key }.to raise_error
end
end
describe "set with a valid decrypting password" do
it "handles SSL private keys properly" do
if RUBY_ENGINE == 'jruby'
pending("find out why this fails with a null pointer exception on jruby")
end
pass = "secure-password!42"
key = File.expand_path("../../fixtures/ssl/client_encrypted_key.pem", __FILE__)
cert = File.expand_path("../../fixtures/ssl/client_encrypted_key_cert.pem", __FILE__)
globals.ssl_cert_file(cert)
globals.ssl_cert_key_password(pass)
globals.ssl_cert_key_file(key)
new_wsdl_request.build
expect(http_request.auth.ssl.cert_key.to_s).to match(/BEGIN RSA PRIVATE KEY/)
end
end
end
describe "ssl cert file" do
it "is set when specified" do
cert = File.expand_path("../../fixtures/ssl/client_cert.pem", __FILE__)
globals.ssl_cert_file(cert)
http_request.auth.ssl.expects(:cert_file=).with(cert)
new_wsdl_request.build
end
it "is not set otherwise" do
http_request.auth.ssl.expects(:cert_file=).never
new_wsdl_request.build
end
end
describe "ssl ca cert file" do
it "is set when specified" do
ca_cert = File.expand_path("../../fixtures/ssl/client_cert.pem", __FILE__)
globals.ssl_ca_cert_file(ca_cert)
http_request.auth.ssl.expects(:ca_cert_file=).with(ca_cert)
new_wsdl_request.build
end
it "is not set otherwise" do
http_request.auth.ssl.expects(:ca_cert_file=).never
new_wsdl_request.build
end
end
describe "basic auth" do
it "is set when specified" do
globals.basic_auth("luke", "secret")
http_request.auth.expects(:basic).with("luke", "secret")
new_wsdl_request.build
end
it "is not set otherwise" do
http_request.auth.expects(:basic).never
new_wsdl_request.build
end
end
describe "digest auth" do
it "is set when specified" do
globals.digest_auth("lea", "top-secret")
http_request.auth.expects(:digest).with("lea", "top-secret")
new_wsdl_request.build
end
it "is not set otherwise" do
http_request.auth.expects(:digest).never
new_wsdl_request.build
end
end
describe "ntlm auth" do
it "is set when specified" do
globals.ntlm("han", "super-secret")
http_request.auth.expects(:ntlm).with("han", "super-secret")
new_wsdl_request.build
end
it "is not set otherwise" do
http_request.auth.expects(:ntlm).never
new_wsdl_request.build
end
end
end
end
describe Savon::SOAPRequest do
let(:globals) { Savon::GlobalOptions.new }
let(:http_request) { HTTPI::Request.new }
let(:ciphers) { OpenSSL::Cipher.ciphers }
def new_soap_request
Savon::SOAPRequest.new(globals, http_request)
end
describe "#build" do
it "returns an HTTPI::Request" do
soap_request = Savon::SOAPRequest.new(globals)
expect(soap_request.build).to be_an(HTTPI::Request)
end
describe "proxy" do
it "is set when specified" do
globals.proxy("http://proxy.example.com")
http_request.expects(:proxy=).with("http://proxy.example.com")
new_soap_request.build
end
it "is not set otherwise" do
http_request.expects(:proxy=).never
new_soap_request.build
end
end
describe "cookies" do
it "sets the given cookies" do
cookies = [HTTPI::Cookie.new("some-cookie=choc-chip; Path=/; HttpOnly")]
http_request.expects(:set_cookies).with(cookies)
new_soap_request.build(:cookies => cookies)
end
it "does not set the cookies if there are none" do
http_request.expects(:set_cookies).never
new_soap_request.build
end
end
describe "open timeout" do
it "is set when specified" do
globals.open_timeout(22)
http_request.expects(:open_timeout=).with(22)
new_soap_request.build
end
it "is not set otherwise" do
http_request.expects(:open_timeout=).never
new_soap_request.build
end
end
describe "read timeout" do
it "is set when specified" do
globals.read_timeout(33)
http_request.expects(:read_timeout=).with(33)
new_soap_request.build
end
it "is not set otherwise" do
http_request.expects(:read_timeout=).never
new_soap_request.build
end
end
describe "headers" do
it "are set when specified" do
globals.headers("X-Token" => "secret")
configured_http_request = new_soap_request.build
expect(configured_http_request.headers["X-Token"]).to eq("secret")
end
it "are not set otherwise" do
configured_http_request = new_soap_request.build
expect(configured_http_request.headers).to_not include("X-Token")
end
end
describe "SOAPAction header" do
it "is set and wrapped in parenthesis" do
configured_http_request = new_soap_request.build(:soap_action => "findUser")
soap_action = configured_http_request.headers["SOAPAction"]
expect(soap_action).to eq(%("findUser"))
end
it "is not set when it's explicitely set to nil" do
configured_http_request = new_soap_request.build(:soap_action => nil)
expect(configured_http_request.headers).to_not include("SOAPAction")
end
it "is not set when there is already a SOAPAction value" do
globals.headers("SOAPAction" => %("authenticate"))
configured_http_request = new_soap_request.build(:soap_action => "findUser")
soap_action = configured_http_request.headers["SOAPAction"]
expect(soap_action).to eq(%("authenticate"))
end
end
describe "Content-Type header" do
it "defaults to SOAP 1.1 and UTF-8" do
configured_http_request = new_soap_request.build
content_type = configured_http_request.headers["Content-Type"]
expect(content_type).to eq("text/xml;charset=UTF-8")
end
it "can be changed to SOAP 1.2 and any other encoding" do
globals.soap_version(2)
globals.encoding("ISO-8859-1")
configured_http_request = new_soap_request.build
content_type = configured_http_request.headers["Content-Type"]
expect(content_type).to eq("application/soap+xml;charset=ISO-8859-1")
end
it "is not set when there is already a Content-Type value" do
globals.headers("Content-Type" => "application/awesomeness;charset=UTF-3000")
configured_http_request = new_soap_request.build(:soap_action => "findUser")
content_type = configured_http_request.headers["Content-Type"]
expect(content_type).to eq("application/awesomeness;charset=UTF-3000")
end
end
describe "ssl version" do
it "is set when specified" do
globals.ssl_version(:TLSv1)
http_request.auth.ssl.expects(:ssl_version=).with(:TLSv1)
new_soap_request.build
end
it "is not set otherwise" do
http_request.auth.ssl.expects(:ssl_version=).never
new_soap_request.build
end
end
describe "ssl verify mode" do
it "is set when specified" do
globals.ssl_verify_mode(:peer)
http_request.auth.ssl.expects(:verify_mode=).with(:peer)
new_soap_request.build
end
it "is not set otherwise" do
http_request.auth.ssl.expects(:verify_mode=).never
new_soap_request.build
end
end
describe "ssl ciphers" do
it "is set when specified" do
globals.ssl_ciphers(ciphers)
http_request.auth.ssl.expects(:ciphers=).with(ciphers)
new_soap_request.build
end
it "is not set otherwise" do
http_request.auth.ssl.expects(:ciphers=).never
new_soap_request.build
end
end
describe "ssl cert key file" do
it "is set when specified" do
cert_key = File.expand_path("../../fixtures/ssl/client_key.pem", __FILE__)
globals.ssl_cert_key_file(cert_key)
http_request.auth.ssl.expects(:cert_key_file=).with(cert_key)
new_soap_request.build
end
it "is not set otherwise" do
http_request.auth.ssl.expects(:cert_key_file=).never
new_soap_request.build
end
end
describe "ssl cert key password" do
it "is set when specified" do
the_pass = "secure-password!42"
globals.ssl_cert_key_password(the_pass)
http_request.auth.ssl.expects(:cert_key_password=).with(the_pass)
new_soap_request.build
end
it "is not set otherwise" do
http_request.auth.ssl.expects(:cert_key_password=).never
new_soap_request.build
end
end
describe "ssl cert file" do
it "is set when specified" do
cert = File.expand_path("../../fixtures/ssl/client_cert.pem", __FILE__)
globals.ssl_cert_file(cert)
http_request.auth.ssl.expects(:cert_file=).with(cert)
new_soap_request.build
end
it "is not set otherwise" do
http_request.auth.ssl.expects(:cert_file=).never
new_soap_request.build
end
end
describe "ssl ca cert file" do
it "is set when specified" do
ca_cert = File.expand_path("../../fixtures/ssl/client_cert.pem", __FILE__)
globals.ssl_ca_cert_file(ca_cert)
http_request.auth.ssl.expects(:ca_cert_file=).with(ca_cert)
new_soap_request.build
end
it "is not set otherwise" do
http_request.auth.ssl.expects(:ca_cert_file=).never
new_soap_request.build
end
end
describe "basic auth" do
it "is set when specified" do
globals.basic_auth("luke", "secret")
http_request.auth.expects(:basic).with("luke", "secret")
new_soap_request.build
end
it "is not set otherwise" do
http_request.auth.expects(:basic).never
new_soap_request.build
end
end
describe "digest auth" do
it "is set when specified" do
globals.digest_auth("lea", "top-secret")
http_request.auth.expects(:digest).with("lea", "top-secret")
new_soap_request.build
end
it "is not set otherwise" do
http_request.auth.expects(:digest).never
new_soap_request.build
end
end
describe "ntlm auth" do
it "is set when specified" do
globals.ntlm("han", "super-secret")
http_request.auth.expects(:ntlm).with("han", "super-secret")
new_soap_request.build
end
it "is not set otherwise" do
http_request.auth.expects(:ntlm).never
new_soap_request.build
end
end
end
end
| {
"pile_set_name": "Github"
} |
/*
* Copyright (c) 2010-2020 Evolveum and contributors
*
* This work is dual-licensed under the Apache License 2.0
* and European Union Public License. See LICENSE file for details.
*/
package com.evolveum.midpoint.prism.impl.query.builder;
import java.util.ArrayList;
import java.util.List;
import javax.xml.namespace.QName;
import org.apache.commons.lang.Validate;
import org.jetbrains.annotations.NotNull;
import com.evolveum.midpoint.prism.*;
import com.evolveum.midpoint.prism.impl.query.*;
import com.evolveum.midpoint.prism.path.ItemPath;
import com.evolveum.midpoint.prism.query.*;
import com.evolveum.midpoint.prism.query.builder.*;
import com.evolveum.midpoint.util.annotation.Experimental;
/**
* EXPERIMENTAL IMPLEMENTATION.
*
* @author mederly
*/
// FIXME: Add better names
@Experimental
public class R_Filter implements S_FilterEntryOrEmpty, S_AtomicFilterExit {
private final QueryBuilder queryBuilder;
private final Class<? extends Containerable> currentClass; // object we are working on (changes on Exists filter)
private final OrFilter currentFilter;
private final LogicalSymbol lastLogicalSymbol;
private final boolean isNegated;
private final R_Filter parentFilter;
private final QName typeRestriction;
private final ItemPath existsRestriction;
private final List<ObjectOrdering> orderingList;
private final List<ObjectGrouping> groupingList;
private final Integer offset;
private final Integer maxSize;
public R_Filter(QueryBuilder queryBuilder) {
this.queryBuilder = queryBuilder;
this.currentClass = queryBuilder.getQueryClass();
this.currentFilter = OrFilterImpl.createOr();
this.lastLogicalSymbol = null;
this.isNegated = false;
this.parentFilter = null;
this.typeRestriction = null;
this.existsRestriction = null;
this.orderingList = new ArrayList<>();
this.groupingList = new ArrayList<>();
this.offset = null;
this.maxSize = null;
}
private R_Filter(QueryBuilder queryBuilder, Class<? extends Containerable> currentClass, OrFilter currentFilter, LogicalSymbol lastLogicalSymbol,
boolean isNegated, R_Filter parentFilter, QName typeRestriction, ItemPath existsRestriction, List<ObjectOrdering> orderingList, List<ObjectGrouping> groupingList, Integer offset, Integer maxSize) {
this.queryBuilder = queryBuilder;
this.currentClass = currentClass;
this.currentFilter = currentFilter;
this.lastLogicalSymbol = lastLogicalSymbol;
this.isNegated = isNegated;
this.parentFilter = parentFilter;
this.typeRestriction = typeRestriction;
this.existsRestriction = existsRestriction;
if (orderingList != null) {
this.orderingList = orderingList;
} else {
this.orderingList = new ArrayList<>();
}
if (groupingList != null) {
this.groupingList = groupingList;
} else {
this.groupingList = new ArrayList<>();
}
this.offset = offset;
this.maxSize = maxSize;
}
public static S_FilterEntryOrEmpty create(QueryBuilder builder) {
return new R_Filter(builder);
}
// subfilter might be null
R_Filter addSubfilter(ObjectFilter subfilter) {
if (!currentFilter.isEmpty() && lastLogicalSymbol == null) {
throw new IllegalStateException("lastLogicalSymbol is empty but there is already some filter present: " + currentFilter);
}
if (typeRestriction != null && existsRestriction != null) {
throw new IllegalStateException("Both type and exists restrictions present");
}
if (typeRestriction != null) {
if (!currentFilter.isEmpty()) {
throw new IllegalStateException("Type restriction with 2 filters?");
}
if (isNegated) {
subfilter = NotFilterImpl.createNot(subfilter);
}
return parentFilter.addSubfilter(TypeFilterImpl.createType(typeRestriction, subfilter));
} else if (existsRestriction != null) {
if (!currentFilter.isEmpty()) {
throw new IllegalStateException("Exists restriction with 2 filters?");
}
if (isNegated) {
subfilter = NotFilterImpl.createNot(subfilter);
}
return parentFilter.addSubfilter(
ExistsFilterImpl.createExists(
existsRestriction,
parentFilter.currentClass,
queryBuilder.getPrismContext(),
subfilter));
} else {
OrFilter newFilter = appendAtomicFilter(subfilter, isNegated, lastLogicalSymbol);
return new R_Filter(queryBuilder, currentClass, newFilter, null, false, parentFilter, typeRestriction, existsRestriction, orderingList, groupingList, offset, maxSize);
}
}
private OrFilter appendAtomicFilter(ObjectFilter subfilter, boolean negated, LogicalSymbol logicalSymbol) {
if (negated) {
subfilter = NotFilterImpl.createNot(subfilter);
}
OrFilter updatedFilter = currentFilter.clone();
if (logicalSymbol == null || logicalSymbol == LogicalSymbol.OR) {
updatedFilter.addCondition(AndFilterImpl.createAnd(subfilter));
} else if (logicalSymbol == LogicalSymbol.AND) {
((AndFilter) updatedFilter.getLastCondition()).addCondition(subfilter);
} else {
throw new IllegalStateException("Unknown logical symbol: " + logicalSymbol);
}
return updatedFilter;
}
private R_Filter setLastLogicalSymbol(LogicalSymbol newLogicalSymbol) {
if (this.lastLogicalSymbol != null) {
throw new IllegalStateException("Two logical symbols in a sequence");
}
return new R_Filter(queryBuilder, currentClass, currentFilter, newLogicalSymbol, isNegated, parentFilter, typeRestriction, existsRestriction, orderingList, groupingList, offset, maxSize);
}
private R_Filter setNegated() {
if (isNegated) {
throw new IllegalStateException("Double negation");
}
return new R_Filter(queryBuilder, currentClass, currentFilter, lastLogicalSymbol, true, parentFilter, typeRestriction, existsRestriction, orderingList, groupingList, offset, maxSize);
}
private R_Filter addOrdering(ObjectOrdering ordering) {
Validate.notNull(ordering);
List<ObjectOrdering> newList = new ArrayList<>(orderingList);
newList.add(ordering);
return new R_Filter(queryBuilder, currentClass, currentFilter, lastLogicalSymbol, isNegated, parentFilter, typeRestriction, existsRestriction, newList, groupingList, offset, maxSize);
}
private R_Filter addGrouping(ObjectGrouping grouping) {
Validate.notNull(grouping);
List<ObjectGrouping> newList = new ArrayList<>(groupingList);
newList.add(grouping);
return new R_Filter(queryBuilder, currentClass, currentFilter, lastLogicalSymbol, isNegated, parentFilter, typeRestriction, existsRestriction, orderingList, newList, offset, maxSize);
}
private R_Filter setOffset(Integer n) {
return new R_Filter(queryBuilder, currentClass, currentFilter, lastLogicalSymbol, isNegated, parentFilter, typeRestriction, existsRestriction, orderingList, groupingList, n, maxSize);
}
private R_Filter setMaxSize(Integer n) {
return new R_Filter(queryBuilder, currentClass, currentFilter, lastLogicalSymbol, isNegated, parentFilter, typeRestriction, existsRestriction, orderingList, groupingList, offset, n);
}
@Override
public S_AtomicFilterExit all() {
return addSubfilter(AllFilterImpl.createAll());
}
@Override
public S_AtomicFilterExit none() {
return addSubfilter(NoneFilterImpl.createNone());
}
@Override
public S_AtomicFilterExit undefined() {
return addSubfilter(UndefinedFilterImpl.createUndefined());
}
@Override
public S_AtomicFilterExit filter(ObjectFilter filter) {
return addSubfilter(filter);
}
// TODO .............................................
@Override
public S_AtomicFilterExit id(String... identifiers) {
return addSubfilter(InOidFilterImpl.createInOid(identifiers));
}
@Override
public S_AtomicFilterExit id(long... identifiers) {
List<String> ids = longsToStrings(identifiers);
return addSubfilter(InOidFilterImpl.createInOid(ids));
}
private List<String> longsToStrings(long[] identifiers) {
List<String> ids = new ArrayList<>(identifiers.length);
for (long id : identifiers) {
ids.add(String.valueOf(id));
}
return ids;
}
@Override
public S_AtomicFilterExit ownerId(String... identifiers) {
return addSubfilter(InOidFilterImpl.createOwnerHasOidIn(identifiers));
}
@Override
public S_AtomicFilterExit ownerId(long... identifiers) {
return addSubfilter(InOidFilterImpl.createOwnerHasOidIn(longsToStrings(identifiers)));
}
@Override
public S_AtomicFilterExit isDirectChildOf(PrismReferenceValue value) {
OrgFilter orgFilter = OrgFilterImpl.createOrg(value, OrgFilter.Scope.ONE_LEVEL);
return addSubfilter(orgFilter);
}
@Override
public S_AtomicFilterExit isChildOf(PrismReferenceValue value) {
OrgFilter orgFilter = OrgFilterImpl.createOrg(value, OrgFilter.Scope.SUBTREE);
return addSubfilter(orgFilter);
}
@Override
public S_AtomicFilterExit isParentOf(PrismReferenceValue value) {
OrgFilter orgFilter = OrgFilterImpl.createOrg(value, OrgFilter.Scope.ANCESTORS);
return addSubfilter(orgFilter);
}
@Override
public S_AtomicFilterExit isDirectChildOf(String oid) {
OrgFilter orgFilter = OrgFilterImpl.createOrg(oid, OrgFilter.Scope.ONE_LEVEL);
return addSubfilter(orgFilter);
}
@Override
public S_AtomicFilterExit isChildOf(String oid) {
OrgFilter orgFilter = OrgFilterImpl.createOrg(oid, OrgFilter.Scope.SUBTREE);
return addSubfilter(orgFilter);
}
@Override
public S_AtomicFilterExit isInScopeOf(String oid, OrgFilter.Scope scope) {
return addSubfilter(OrgFilterImpl.createOrg(oid, scope));
}
@Override
public S_AtomicFilterExit isInScopeOf(PrismReferenceValue value, OrgFilter.Scope scope) {
return addSubfilter(OrgFilterImpl.createOrg(value, scope));
}
@Override
public S_AtomicFilterExit isParentOf(String oid) {
OrgFilter orgFilter = OrgFilterImpl.createOrg(oid, OrgFilter.Scope.ANCESTORS);
return addSubfilter(orgFilter);
}
@Override
public S_AtomicFilterExit isRoot() {
OrgFilter orgFilter = OrgFilterImpl.createRootOrg();
return addSubfilter(orgFilter);
}
@Override
public S_AtomicFilterExit fullText(String... words) {
FullTextFilter fullTextFilter = FullTextFilterImpl.createFullText(words);
return addSubfilter(fullTextFilter);
}
@Override
public S_FilterEntryOrEmpty block() {
return new R_Filter(queryBuilder, currentClass, OrFilterImpl.createOr(), null, false, this, null, null, null, null, null, null);
}
@Override
public S_FilterEntryOrEmpty type(Class<? extends Containerable> type) {
ComplexTypeDefinition ctd = queryBuilder.getPrismContext().getSchemaRegistry().findComplexTypeDefinitionByCompileTimeClass(type);
if (ctd == null) {
throw new IllegalArgumentException("Unknown type: " + type);
}
QName typeName = ctd.getTypeName();
if (typeName == null) {
throw new IllegalStateException("No type name for " + ctd);
}
return new R_Filter(queryBuilder, type, OrFilterImpl.createOr(), null, false, this, typeName, null, null, null, null, null);
}
@Override
public S_FilterEntryOrEmpty type(@NotNull QName typeName) {
ComplexTypeDefinition ctd = queryBuilder.getPrismContext().getSchemaRegistry().findComplexTypeDefinitionByType(typeName);
if (ctd == null) {
throw new IllegalArgumentException("Unknown type: " + typeName);
}
//noinspection unchecked
Class<? extends Containerable> type = (Class<? extends Containerable>) ctd.getCompileTimeClass();
if (type == null) {
throw new IllegalStateException("No compile time class for " + ctd);
}
return new R_Filter(queryBuilder, type, OrFilterImpl.createOr(), null, false, this, typeName, null, null, null, null, null);
}
@Override
public S_FilterEntryOrEmpty exists(Object... components) {
if (existsRestriction != null) {
throw new IllegalStateException("Exists within exists");
}
if (components.length == 0) {
throw new IllegalArgumentException("Empty path in exists() filter is not allowed.");
}
ItemPath existsPath = ItemPath.create(components);
PrismContainerDefinition pcd = resolveItemPath(existsPath, PrismContainerDefinition.class);
//noinspection unchecked
Class<? extends Containerable> clazz = pcd.getCompileTimeClass();
if (clazz == null) {
throw new IllegalArgumentException("Item path of '" + existsPath + "' in " + currentClass + " does not point to a valid prism container.");
}
return new R_Filter(queryBuilder, clazz, OrFilterImpl.createOr(), null, false, this, null, existsPath, null, null, null, null);
}
private <ID extends ItemDefinition> ID resolveItemPath(ItemPath itemPath, Class<ID> type) {
Validate.notNull(type, "type");
ComplexTypeDefinition ctd = queryBuilder.getPrismContext().getSchemaRegistry().findComplexTypeDefinitionByCompileTimeClass(currentClass);
if (ctd == null) {
throw new IllegalArgumentException("Definition for " + currentClass + " couldn't be found.");
}
ID definition = ctd.findItemDefinition(itemPath, type);
if (definition == null) {
throw new IllegalArgumentException("Item path of '" + itemPath + "' in " + currentClass + " does not point to a valid " + type.getSimpleName());
}
return definition;
}
// END OF TODO .............................................
@Override
public S_FilterEntry and() {
return setLastLogicalSymbol(LogicalSymbol.AND);
}
@Override
public S_FilterEntry or() {
return setLastLogicalSymbol(LogicalSymbol.OR);
}
@Override
public S_AtomicFilterEntry not() {
return setNegated();
}
@Override
public S_ConditionEntry item(QName... names) {
return item(ItemPath.create((Object[]) names));
}
@Override
public S_ConditionEntry item(String... names) {
return item(ItemPath.create((Object[]) names));
}
@Override
public S_ConditionEntry item(ItemPath itemPath) {
ItemDefinition itemDefinition = resolveItemPath(getPrismContext().toUniformPath(itemPath), ItemDefinition.class);
return item(itemPath, itemDefinition);
}
@Override
public S_ConditionEntry itemWithDef(ItemDefinition itemDefinition, QName... names) {
ItemPath itemPath = ItemPath.create((Object[]) names);
return item(itemPath, itemDefinition);
}
@Override
public S_ConditionEntry item(ItemPath itemPath, ItemDefinition itemDefinition) {
if (itemDefinition != null) {
return R_AtomicFilter.create(getPrismContext().toUniformPath(itemPath), itemDefinition, this);
} else {
return item(itemPath);
}
}
@Override
public S_ConditionEntry item(PrismContainerDefinition containerDefinition, QName... names) {
return item(containerDefinition, ItemPath.create((Object[]) names));
}
@Override
public S_ConditionEntry item(PrismContainerDefinition containerDefinition, ItemPath itemPath) {
ItemDefinition itemDefinition = containerDefinition.findItemDefinition(itemPath);
if (itemDefinition == null) {
throw new IllegalArgumentException("No definition of " + itemPath + " in " + containerDefinition);
}
return item(itemPath, itemDefinition);
}
@Override
public S_MatchingRuleEntry itemAs(PrismProperty<?> property) {
return item(property.getPath(), property.getDefinition()).eq(property);
}
@Override
public S_AtomicFilterExit endBlock() {
if (parentFilter == null) {
throw new IllegalStateException("endBlock() call without preceding block() one");
}
if (hasRestriction()) {
return addSubfilter(null).endBlock(); // finish if this is open 'type' or 'exists' filter
}
if (currentFilter != null || parentFilter.hasRestriction()) {
ObjectFilter simplified = simplify(currentFilter);
if (simplified != null || parentFilter.hasRestriction()) {
return parentFilter.addSubfilter(simplified);
}
}
return parentFilter;
}
private boolean hasRestriction() {
return existsRestriction != null || typeRestriction != null;
}
@Override
public S_FilterExit asc(QName... names) {
if (names.length == 0) {
throw new IllegalArgumentException("There must be at least one name for asc(...) ordering");
}
return addOrdering(ObjectOrderingImpl.createOrdering(ItemPath.create((Object[]) names), OrderDirection.ASCENDING));
}
@Override
public S_FilterExit asc(ItemPath path) {
if (ItemPath.isEmpty(path)) {
throw new IllegalArgumentException("There must be non-empty path for asc(...) ordering");
}
return addOrdering(ObjectOrderingImpl.createOrdering(path, OrderDirection.ASCENDING));
}
@Override
public S_FilterExit desc(QName... names) {
if (names.length == 0) {
throw new IllegalArgumentException("There must be at least one name for asc(...) ordering");
}
return addOrdering(ObjectOrderingImpl.createOrdering(ItemPath.create((Object[]) names), OrderDirection.DESCENDING));
}
@Override
public S_FilterExit desc(ItemPath path) {
if (ItemPath.isEmpty(path)) {
throw new IllegalArgumentException("There must be non-empty path for desc(...) ordering");
}
return addOrdering(ObjectOrderingImpl.createOrdering(path, OrderDirection.DESCENDING));
}
@Override
public S_FilterExit group(QName... names) {
if (names.length == 0) {
throw new IllegalArgumentException("There must be at least one name for uniq(...) grouping");
}
return addGrouping(ObjectGroupingImpl.createGrouping(ItemPath.create((Object[]) names)));
}
@Override
public S_FilterExit group(ItemPath path) {
if (ItemPath.isEmpty(path)) {
throw new IllegalArgumentException("There must be non-empty path for uniq(...) grouping");
}
return addGrouping(ObjectGroupingImpl.createGrouping(path));
}
@Override
public S_FilterExit offset(Integer n) {
return setOffset(n);
}
@Override
public S_FilterExit maxSize(Integer n) {
return setMaxSize(n);
}
@Override
public ObjectQuery build() {
if (typeRestriction != null || existsRestriction != null) {
// unfinished empty type restriction or exists restriction
return addSubfilter(null).build();
}
if (parentFilter != null) {
throw new IllegalStateException("A block in filter definition was probably not closed.");
}
ObjectPaging paging = null;
if (!orderingList.isEmpty()) {
paging = createIfNeeded(null);
paging.setOrdering(orderingList);
}
if (offset != null) {
paging = createIfNeeded(paging);
paging.setOffset(offset);
}
if (maxSize != null) {
paging = createIfNeeded(paging);
paging.setMaxSize(maxSize);
}
return ObjectQueryImpl.createObjectQuery(simplify(currentFilter), paging);
}
private ObjectPaging createIfNeeded(ObjectPaging paging) {
return paging != null ? paging : ObjectPagingImpl.createEmptyPaging();
}
@Override
public ObjectFilter buildFilter() {
return build().getFilter();
}
private ObjectFilter simplify(OrFilter filter) {
if (filter == null) {
return null;
}
OrFilter simplified = OrFilterImpl.createOr();
// step 1 - simplify conjunctions
for (ObjectFilter condition : filter.getConditions()) {
AndFilter conjunction = (AndFilter) condition;
if (conjunction.getConditions().size() == 1) {
simplified.addCondition(conjunction.getLastCondition());
} else {
simplified.addCondition(conjunction);
}
}
// step 2 - simplify disjunction
if (simplified.getConditions().size() == 0) {
return null;
} else if (simplified.getConditions().size() == 1) {
return simplified.getLastCondition();
} else {
return simplified;
}
}
public PrismContext getPrismContext() {
return queryBuilder.getPrismContext();
}
}
| {
"pile_set_name": "Github"
} |
Copyright (C) 1992, 1997-2002, 2004-2020 Free Software Foundation, Inc.
Copying and distribution of this file, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved.
This is GNU grep, the "fastest grep in the west" (we hope). All
bugs reported in previous releases have been fixed. Many exciting new
bugs have probably been introduced in this revision.
GNU grep is provided "as is" with no warranty. The exact terms
under which you may use and (re)distribute this program are detailed
in the GNU General Public License, in the file COPYING.
GNU grep is based on a fast lazy-state deterministic matcher (about
twice as fast as stock Unix egrep) hybridized with a Boyer-Moore-Gosper
search for a fixed string that eliminates impossible text from being
considered by the full regexp matcher without necessarily having to
look at every character. The result is typically many times faster
than Unix grep or egrep. (Regular expressions containing back-references
will run more slowly, however.)
See the files AUTHORS and THANKS for a list of authors and other contributors.
See the file INSTALL for compilation and installation instructions.
If there is no INSTALL file, this copy of the source code is intended
for expert hackers; please see the file README-hacking.
See the file NEWS for a description of major changes in this release.
See the file TODO for ideas on how you could help us improve grep.
See the file README-alpha for information on grep development and the CVS
repository.
Send bug reports to [email protected].
KNOWN BUGS:
Several tests in fmbtest.sh and foad1.sh fail under the cs_CZ.UTF-8 locale
and have been disabled.
The combination of -o and -i options is broken and the known failing cases
are disabled in foad1.sh
The option -i does not work properly in some multibyte locales such as
tr_TR.UTF-8 where the upper case and lower case forms of a character are not
necessarily of the same byte length.
A list of outstanding and resolved bugs can be found at:
https://debbugs.gnu.org/cgi/pkgreport.cgi?package=grep
You can also browse the bug-grep mailing list archive at:
https://lists.gnu.org/r/bug-grep/
For any copyright year range specified as YYYY-ZZZZ in this package
note that the range specifies every single year in that closed interval.
| {
"pile_set_name": "Github"
} |
// std::unique_lock implementation -*- C++ -*-
// Copyright (C) 2008-2020 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.
// You should have received a copy of the GNU General Public License and
// a copy of the GCC Runtime Library Exception along with this program;
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
/** @file bits/unique_lock.h
* This is an internal header file, included by other library headers.
* Do not attempt to use it directly. @headername{mutex}
*/
#ifndef _GLIBCXX_UNIQUE_LOCK_H
#define _GLIBCXX_UNIQUE_LOCK_H 1
#pragma GCC system_header
#if __cplusplus < 201103L
# include <bits/c++0x_warning.h>
#else
#include <chrono>
#include <bits/move.h> // for std::swap
namespace std _GLIBCXX_VISIBILITY(default)
{
_GLIBCXX_BEGIN_NAMESPACE_VERSION
/** @brief A movable scoped lock type.
*
* A unique_lock controls mutex ownership within a scope. Ownership of the
* mutex can be delayed until after construction and can be transferred
* to another unique_lock by move construction or move assignment. If a
* mutex lock is owned when the destructor runs ownership will be released.
*
* @ingroup mutexes
*/
template<typename _Mutex>
class unique_lock
{
public:
typedef _Mutex mutex_type;
unique_lock() noexcept
: _M_device(0), _M_owns(false)
{ }
explicit unique_lock(mutex_type& __m)
: _M_device(std::__addressof(__m)), _M_owns(false)
{
lock();
_M_owns = true;
}
unique_lock(mutex_type& __m, defer_lock_t) noexcept
: _M_device(std::__addressof(__m)), _M_owns(false)
{ }
unique_lock(mutex_type& __m, try_to_lock_t)
: _M_device(std::__addressof(__m)), _M_owns(_M_device->try_lock())
{ }
unique_lock(mutex_type& __m, adopt_lock_t) noexcept
: _M_device(std::__addressof(__m)), _M_owns(true)
{
// XXX calling thread owns mutex
}
template<typename _Clock, typename _Duration>
unique_lock(mutex_type& __m,
const chrono::time_point<_Clock, _Duration>& __atime)
: _M_device(std::__addressof(__m)),
_M_owns(_M_device->try_lock_until(__atime))
{ }
template<typename _Rep, typename _Period>
unique_lock(mutex_type& __m,
const chrono::duration<_Rep, _Period>& __rtime)
: _M_device(std::__addressof(__m)),
_M_owns(_M_device->try_lock_for(__rtime))
{ }
~unique_lock()
{
if (_M_owns)
unlock();
}
unique_lock(const unique_lock&) = delete;
unique_lock& operator=(const unique_lock&) = delete;
unique_lock(unique_lock&& __u) noexcept
: _M_device(__u._M_device), _M_owns(__u._M_owns)
{
__u._M_device = 0;
__u._M_owns = false;
}
unique_lock& operator=(unique_lock&& __u) noexcept
{
if(_M_owns)
unlock();
unique_lock(std::move(__u)).swap(*this);
__u._M_device = 0;
__u._M_owns = false;
return *this;
}
void
lock()
{
if (!_M_device)
__throw_system_error(int(errc::operation_not_permitted));
else if (_M_owns)
__throw_system_error(int(errc::resource_deadlock_would_occur));
else
{
_M_device->lock();
_M_owns = true;
}
}
bool
try_lock()
{
if (!_M_device)
__throw_system_error(int(errc::operation_not_permitted));
else if (_M_owns)
__throw_system_error(int(errc::resource_deadlock_would_occur));
else
{
_M_owns = _M_device->try_lock();
return _M_owns;
}
}
template<typename _Clock, typename _Duration>
bool
try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
{
if (!_M_device)
__throw_system_error(int(errc::operation_not_permitted));
else if (_M_owns)
__throw_system_error(int(errc::resource_deadlock_would_occur));
else
{
_M_owns = _M_device->try_lock_until(__atime);
return _M_owns;
}
}
template<typename _Rep, typename _Period>
bool
try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
{
if (!_M_device)
__throw_system_error(int(errc::operation_not_permitted));
else if (_M_owns)
__throw_system_error(int(errc::resource_deadlock_would_occur));
else
{
_M_owns = _M_device->try_lock_for(__rtime);
return _M_owns;
}
}
void
unlock()
{
if (!_M_owns)
__throw_system_error(int(errc::operation_not_permitted));
else if (_M_device)
{
_M_device->unlock();
_M_owns = false;
}
}
void
swap(unique_lock& __u) noexcept
{
std::swap(_M_device, __u._M_device);
std::swap(_M_owns, __u._M_owns);
}
mutex_type*
release() noexcept
{
mutex_type* __ret = _M_device;
_M_device = 0;
_M_owns = false;
return __ret;
}
bool
owns_lock() const noexcept
{ return _M_owns; }
explicit operator bool() const noexcept
{ return owns_lock(); }
mutex_type*
mutex() const noexcept
{ return _M_device; }
private:
mutex_type* _M_device;
bool _M_owns;
};
/// Swap overload for unique_lock objects.
/// @relates unique_lock
template<typename _Mutex>
inline void
swap(unique_lock<_Mutex>& __x, unique_lock<_Mutex>& __y) noexcept
{ __x.swap(__y); }
_GLIBCXX_END_NAMESPACE_VERSION
} // namespace
#endif // C++11
#endif // _GLIBCXX_UNIQUE_LOCK_H
| {
"pile_set_name": "Github"
} |
// Copyright 2018 The Cluster Monitoring Operator Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tasks
import (
"github.com/openshift/cluster-monitoring-operator/pkg/client"
"github.com/openshift/cluster-monitoring-operator/pkg/manifests"
"github.com/pkg/errors"
rbacv1 "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/klog"
)
type ClusterMonitoringOperatorTask struct {
client *client.Client
factory *manifests.Factory
}
func NewClusterMonitoringOperatorTask(client *client.Client, factory *manifests.Factory) *ClusterMonitoringOperatorTask {
return &ClusterMonitoringOperatorTask{
client: client,
factory: factory,
}
}
func (t *ClusterMonitoringOperatorTask) Run() error {
svc, err := t.factory.ClusterMonitoringOperatorService()
if err != nil {
return errors.Wrap(err, "initializing Cluster Monitoring Operator Service failed")
}
err = t.client.CreateOrUpdateService(svc)
if err != nil {
return errors.Wrap(err, "reconciling Cluster Monitoring Operator Service failed")
}
for name, crf := range map[string]func() (*rbacv1.ClusterRole, error){
"cluster-monitoring-view": t.factory.ClusterMonitoringClusterRole,
"monitoring-rules-edit": t.factory.ClusterMonitoringRulesEditClusterRole,
"monitoring-rules-view": t.factory.ClusterMonitoringRulesViewClusterRole,
"monitoring-edit": t.factory.ClusterMonitoringEditClusterRole,
} {
cr, err := crf()
if err != nil {
return errors.Wrapf(err, "initializing %s ClusterRole failed", name)
}
err = t.client.CreateOrUpdateClusterRole(cr)
if err != nil {
return errors.Wrapf(err, "reconciling %s ClusterRole failed", name)
}
}
uwcr, err := t.factory.ClusterMonitoringEditUserWorkloadConfigRole()
if err != nil {
return errors.Wrap(err, "initializing UserWorkloadConfigEdit Role failed")
}
err = t.client.CreateOrUpdateRole(uwcr)
if err != nil {
return errors.Wrap(err, "reconciling UserWorkloadConfigEdit Role failed")
}
smcmo, err := t.factory.ClusterMonitoringOperatorServiceMonitor()
if err != nil {
return errors.Wrap(err, "initializing Cluster Monitoring Operator ServiceMonitor failed")
}
err = t.client.CreateOrUpdateServiceMonitor(smcmo)
if err != nil {
return errors.Wrap(err, "reconciling Cluster Monitoring Operator ServiceMonitor failed")
}
s, err := t.factory.GRPCSecret()
if err != nil {
return errors.Wrap(err, "error initializing Cluster Monitoring Operator GRPC TLS secret")
}
loaded, err := t.client.GetSecret(s.Namespace, s.Name)
switch {
case apierrors.IsNotFound(err):
// No secret was found, proceed with the default empty secret from manifests.
klog.V(5).Info("creating new Cluster Monitoring Operator GRPC TLS secret")
case err == nil:
// Secret was found, use that.
s = loaded
klog.V(5).Info("found existing Cluster Monitoring Operator GRPC TLS secret")
default:
return errors.Wrap(err, "error reading Cluster Monitoring Operator GRPC TLS secret")
}
err = manifests.RotateGRPCSecret(s)
if err != nil {
return errors.Wrap(err, "error rotating Cluster Monitoring Operator GRPC TLS secret")
}
err = t.client.CreateOrUpdateSecret(s)
if err != nil {
return errors.Wrap(err, "error creating Cluster Monitoring Operator GRPC TLS secret")
}
return nil
}
| {
"pile_set_name": "Github"
} |
'use strict';
module.exports = {
up: function(queryInterface, Sequelize) {
return queryInterface.addColumn('Users', 'longDescription', {
type: Sequelize.TEXT,
});
},
down: function(queryInterface) {
return queryInterface.removeColumn('Users', 'longDescription');
},
};
| {
"pile_set_name": "Github"
} |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<!-- Generated by javadoc (1.8.0_202) on Fri Jul 24 11:53:40 GMT 2020 -->
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<title>Uses of Class org.eclipse.rdf4j.sail.inferencer.fc.config.CustomGraphQueryInferencerSchema (Eclipse RDF4J 3.3.0 API)</title>
<meta name="date" content="2020-07-24">
<link rel="stylesheet" type="text/css" href="../../../../../../../../stylesheet.css" title="Style">
<script type="text/javascript" src="../../../../../../../../script.js"></script>
</head>
<body>
<script type="text/javascript"><!--
try {
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="Uses of Class org.eclipse.rdf4j.sail.inferencer.fc.config.CustomGraphQueryInferencerSchema (Eclipse RDF4J 3.3.0 API)";
}
}
catch(err) {
}
//-->
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar.top">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.top" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.top.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../../../../overview-summary.html">Overview</a></li>
<li><a href="../package-summary.html">Package</a></li>
<li><a href="../../../../../../../../org/eclipse/rdf4j/sail/inferencer/fc/config/CustomGraphQueryInferencerSchema.html" title="class in org.eclipse.rdf4j.sail.inferencer.fc.config">Class</a></li>
<li class="navBarCell1Rev">Use</li>
<li><a href="../package-tree.html">Tree</a></li>
<li><a href="../../../../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="../../../../../../../../index.html?org/eclipse/rdf4j/sail/inferencer/fc/config/class-use/CustomGraphQueryInferencerSchema.html" target="_top">Frames</a></li>
<li><a href="CustomGraphQueryInferencerSchema.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../../../../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip.navbar.top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<div class="header">
<h2 title="Uses of Class org.eclipse.rdf4j.sail.inferencer.fc.config.CustomGraphQueryInferencerSchema" class="title">Uses of Class<br>org.eclipse.rdf4j.sail.inferencer.fc.config.CustomGraphQueryInferencerSchema</h2>
</div>
<div class="classUseContainer">No usage of org.eclipse.rdf4j.sail.inferencer.fc.config.CustomGraphQueryInferencerSchema</div>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar.bottom">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.bottom" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.bottom.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../../../../overview-summary.html">Overview</a></li>
<li><a href="../package-summary.html">Package</a></li>
<li><a href="../../../../../../../../org/eclipse/rdf4j/sail/inferencer/fc/config/CustomGraphQueryInferencerSchema.html" title="class in org.eclipse.rdf4j.sail.inferencer.fc.config">Class</a></li>
<li class="navBarCell1Rev">Use</li>
<li><a href="../package-tree.html">Tree</a></li>
<li><a href="../../../../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="../../../../../../../../index.html?org/eclipse/rdf4j/sail/inferencer/fc/config/class-use/CustomGraphQueryInferencerSchema.html" target="_top">Frames</a></li>
<li><a href="CustomGraphQueryInferencerSchema.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../../../../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip.navbar.bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<p class="legalCopy"><small>Copyright © 2015-2020 <a href="https://www.eclipse.org/">Eclipse Foundation</a>. All Rights Reserved.</small></p>
</body>
</html>
| {
"pile_set_name": "Github"
} |
# File generated from CLDR ver. 25
decimalSeparator = .
groupingSeparator = ,
percent = %
zeroDigit = 0
plusSign = +
minusSign = -
exponentialSymbol = E
perMill = \u2030
infinity = \u221E
notANumber = NaN
monetarySeparator = .
monetaryGroupingSeparator = ,
decimalPattern = #,##0.###
scientificPattern = #E0
percentPattern = #,##0%
currencyPattern = \u00A4#,##0.00
simpleCurrencyPattern = \u00A4\u00A4\u00A4\u00A4#,##0.00
globalCurrencyPattern = \u00A4\u00A4\u00A4\u00A4#,##0.00 \u00A4\u00A4
defCurrencyCode = XCD
| {
"pile_set_name": "Github"
} |
120
111 112 113 114 115 116 117 118 119 120
| {
"pile_set_name": "Github"
} |
//
// Generated by class-dump 3.5 (64 bit) (Debug version compiled Jun 9 2015 22:53:21).
//
// class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2014 by Steve Nygard.
//
#import "TSizePropertyExtractor.h"
@interface TPropertyLogicalSizeExtractor : TSizePropertyExtractor
{
}
+ (id)extractor;
@end
| {
"pile_set_name": "Github"
} |
// Scilab ( http://www.scilab.org/ ) - This file is part of Scilab
// Copyright (C) 2008 - INRIA
// Copyright (C) 2009-2010 - DIGITEO
//
// This file is released under the 3-clause BSD license. See COPYING-BSD.
subdemolist = [_("Basic controller") , "basic_controller.dem.sce" ;..
_("Water tank") , "demo_watertank.dem.sce" ; ..
_("Discrete Controller") , "discrete_controller.dem.sce" ;..
_("Kalman Filter") , "kalman.dem.sce" ; ..
_("Discrete Kalman Filter") , "discrete_kalman.dem.sce" ; ..
_("Cont.Plant-Hybrid Observer") , "cont_sys_disc_cont.dem.sce" ; ..
_("Temperature Controller") , "temp_controller.dem.sce" ; ..
_("Inverted pendulum") , "inverted_pendulum.dem.sce" ; ..
_("Lorenz butterfly") , "lorenz.dem.sce" ; ..
];
subdemolist(:,2) = SCI + "/modules/xcos/demos/" + subdemolist(:,2);
| {
"pile_set_name": "Github"
} |
var dir_15c6b1131136f5f64828719bf468896a =
[
[ "CmdOption.hh", "a00002_source.html", null ],
[ "DecimaterViewerWidget.hh", "a00014_source.html", null ]
]; | {
"pile_set_name": "Github"
} |
{
"$id": "https:/jenkins-x.io/tests/basicTypes.schema.json",
"$schema": "http://json-schema.org/draft-07/schema#",
"description": "test values.yaml",
"type": "object",
"properties": {
"numberValue": {
"type": "number",
"multipleOf": 10
},
"integerValue": {
"type": "integer",
"multipleOf": 20
}
}
} | {
"pile_set_name": "Github"
} |
import UIKit
enum WidgetType {
case today
case allTime
case thisWeek
case loadingFailed
var configureLabelFont: UIFont {
switch self {
case .loadingFailed:
return WidgetStyles.headlineFont
default:
return WidgetStyles.footnoteNote
}
}
}
class WidgetUnconfiguredCell: UITableViewCell {
// MARK: - Properties
static let reuseIdentifier = "WidgetUnconfiguredCell"
@IBOutlet private var configureLabel: UILabel!
@IBOutlet private var separatorLine: UIView!
@IBOutlet private var separatorVisualEffectView: UIVisualEffectView!
@IBOutlet private var actionLabel: UILabel!
private var widgetType: WidgetType?
// MARK: - View
func configure(for widgetType: WidgetType) {
self.widgetType = widgetType
configureView()
}
}
// MARK: - Private Extension
private extension WidgetUnconfiguredCell {
func configureView() {
guard let widgetType = widgetType else {
return
}
configureLabel.text = {
switch widgetType {
case .today:
return LocalizedText.configureToday
case .allTime:
return LocalizedText.configureAllTime
case .thisWeek:
return LocalizedText.configureThisWeek
case .loadingFailed:
return LocalizedText.loadingFailed
}
}()
configureLabel.font = widgetType.configureLabelFont
actionLabel.text = widgetType == .loadingFailed ? LocalizedText.retry : LocalizedText.openWordPress
configureLabel.textColor = WidgetStyles.primaryTextColor
actionLabel.textColor = WidgetStyles.primaryTextColor
WidgetStyles.configureSeparator(separatorLine)
separatorVisualEffectView.effect = WidgetStyles.separatorVibrancyEffect
}
enum LocalizedText {
static let configureToday = NSLocalizedString("Display your site stats for today here. Configure in the WordPress app in your site stats.", comment: "Unconfigured stats today widget helper text")
static let configureAllTime = NSLocalizedString("Display your all-time site stats here. Configure in the WordPress app in your site stats.", comment: "Unconfigured stats all-time widget helper text")
static let configureThisWeek = NSLocalizedString("Display your site stats for this week here. Configure in the WordPress app in your site stats.", comment: "Unconfigured stats this week widget helper text")
static let openWordPress = NSLocalizedString("Open WordPress", comment: "Today widget label to launch WP app")
static let loadingFailed = NSLocalizedString("Couldn't load data", comment: "Message displayed when a Stats widget failed to load data.")
static let retry = NSLocalizedString("Retry", comment: "Stats widgets label to reload the widget.")
}
}
| {
"pile_set_name": "Github"
} |
/*
* Copyright 2006-2018 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.consol.citrus.functions.core;
import com.consol.citrus.context.TestContext;
import com.consol.citrus.exceptions.CitrusRuntimeException;
import com.consol.citrus.exceptions.InvalidFunctionUsageException;
import com.consol.citrus.functions.Function;
import org.springframework.util.CollectionUtils;
import java.util.List;
import java.util.Optional;
/**
* Function returns given string argument in lower case.
*
* @author Christoph Deppisch
*/
public class SystemPropertyFunction implements Function {
@Override
public String execute(List<String> parameterList, TestContext context) {
if (CollectionUtils.isEmpty(parameterList)) {
throw new InvalidFunctionUsageException("Invalid function parameters - must set system property name");
}
String propertyName = parameterList.get(0);
final Optional<String> defaultValue;
if (parameterList.size() > 1) {
defaultValue = Optional.of(parameterList.get(1));
} else {
defaultValue = Optional.empty();
}
return Optional.ofNullable(System.getProperty(propertyName))
.orElseGet(() -> defaultValue.orElseThrow(() -> new CitrusRuntimeException(String.format("Failed to resolve system property '%s'", propertyName))));
}
}
| {
"pile_set_name": "Github"
} |
cmake_minimum_required(VERSION 3.0.2)
project(luasocket)
if(NOT WIN32)
message(FATAL_ERROR "Written for windows only")
endif()
find_path(LUA_INCLUDE_DIR lua.h PATH_SUFFIXES lua)
find_library(LUA_LIBRARY lua)
set(LUASOCKET_INCLUDES ${LUA_INCLUDE_DIR} src)
set(LUASOCKET_LIBRARIES ${LUA_LIBRARY} ws2_32)
add_library(socket.core
src/luasocket.c
src/timeout.c
src/buffer.c
src/io.c
src/auxiliar.c
src/options.c
src/inet.c
src/except.c
src/select.c
src/tcp.c
src/udp.c
src/compat.c
src/wsocket.c)
add_library(mime.core
src/mime.c
src/compat.c)
target_include_directories(socket.core PRIVATE ${LUASOCKET_INCLUDES})
target_link_libraries(socket.core PRIVATE ${LUASOCKET_LIBRARIES})
target_include_directories(mime.core PRIVATE ${LUASOCKET_INCLUDES})
target_link_libraries(mime.core PRIVATE ${LUASOCKET_LIBRARIES})
add_definitions(
"-DLUASOCKET_API=__declspec(dllexport)"
"-DMIME_API=__declspec(dllexport)")
install(TARGETS socket.core
RUNTIME DESTINATION bin/socket
LIBRARY DESTINATION lib
ARCHIVE DESTINATION lib)
install(TARGETS mime.core
RUNTIME DESTINATION bin/mime
LIBRARY DESTINATION lib
ARCHIVE DESTINATION lib)
install(FILES
src/ltn12.lua
src/socket.lua
src/mime.lua
DESTINATION share/lua)
install(FILES
src/http.lua
src/url.lua
src/tp.lua
src/ftp.lua
src/headers.lua
src/smtp.lua
DESTINATION share/lua/socket)
| {
"pile_set_name": "Github"
} |
/* SPDX-License-Identifier: GPL-2.0 */
/*
* NFS internal definitions
*/
#include "nfs4_fs.h"
#include <linux/mount.h>
#include <linux/security.h>
#include <linux/crc32.h>
#include <linux/nfs_page.h>
#include <linux/wait_bit.h>
#define NFS_MS_MASK (SB_RDONLY|SB_NOSUID|SB_NODEV|SB_NOEXEC|SB_SYNCHRONOUS)
extern const struct export_operations nfs_export_ops;
struct nfs_string;
/* Maximum number of readahead requests
* FIXME: this should really be a sysctl so that users may tune it to suit
* their needs. People that do NFS over a slow network, might for
* instance want to reduce it to something closer to 1 for improved
* interactive response.
*/
#define NFS_MAX_READAHEAD (RPC_DEF_SLOT_TABLE - 1)
static inline void nfs_attr_check_mountpoint(struct super_block *parent, struct nfs_fattr *fattr)
{
if (!nfs_fsid_equal(&NFS_SB(parent)->fsid, &fattr->fsid))
fattr->valid |= NFS_ATTR_FATTR_MOUNTPOINT;
}
static inline int nfs_attr_use_mounted_on_fileid(struct nfs_fattr *fattr)
{
if (((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) == 0) ||
(((fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT) == 0) &&
((fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) == 0)))
return 0;
return 1;
}
struct nfs_clone_mount {
const struct super_block *sb;
const struct dentry *dentry;
struct nfs_fh *fh;
struct nfs_fattr *fattr;
char *hostname;
char *mnt_path;
struct sockaddr *addr;
size_t addrlen;
rpc_authflavor_t authflavor;
};
/*
* Note: RFC 1813 doesn't limit the number of auth flavors that
* a server can return, so make something up.
*/
#define NFS_MAX_SECFLAVORS (12)
/*
* Value used if the user did not specify a port value.
*/
#define NFS_UNSPEC_PORT (-1)
#define NFS_UNSPEC_RETRANS (UINT_MAX)
#define NFS_UNSPEC_TIMEO (UINT_MAX)
/*
* Maximum number of pages that readdir can use for creating
* a vmapped array of pages.
*/
#define NFS_MAX_READDIR_PAGES 8
struct nfs_client_initdata {
unsigned long init_flags;
const char *hostname; /* Hostname of the server */
const struct sockaddr *addr; /* Address of the server */
const char *nodename; /* Hostname of the client */
const char *ip_addr; /* IP address of the client */
size_t addrlen;
struct nfs_subversion *nfs_mod;
int proto;
u32 minorversion;
struct net *net;
const struct rpc_timeout *timeparms;
};
/*
* In-kernel mount arguments
*/
struct nfs_parsed_mount_data {
int flags;
unsigned int rsize, wsize;
unsigned int timeo, retrans;
unsigned int acregmin, acregmax,
acdirmin, acdirmax;
unsigned int namlen;
unsigned int options;
unsigned int bsize;
struct nfs_auth_info auth_info;
rpc_authflavor_t selected_flavor;
char *client_address;
unsigned int version;
unsigned int minorversion;
char *fscache_uniq;
bool need_mount;
struct {
struct sockaddr_storage address;
size_t addrlen;
char *hostname;
u32 version;
int port;
unsigned short protocol;
} mount_server;
struct {
struct sockaddr_storage address;
size_t addrlen;
char *hostname;
char *export_path;
int port;
unsigned short protocol;
} nfs_server;
struct security_mnt_opts lsm_opts;
struct net *net;
};
/* mount_clnt.c */
struct nfs_mount_request {
struct sockaddr *sap;
size_t salen;
char *hostname;
char *dirpath;
u32 version;
unsigned short protocol;
struct nfs_fh *fh;
int noresvport;
unsigned int *auth_flav_len;
rpc_authflavor_t *auth_flavs;
struct net *net;
};
struct nfs_mount_info {
void (*fill_super)(struct super_block *, struct nfs_mount_info *);
int (*set_security)(struct super_block *, struct dentry *, struct nfs_mount_info *);
struct nfs_parsed_mount_data *parsed;
struct nfs_clone_mount *cloned;
struct nfs_fh *mntfh;
};
extern int nfs_mount(struct nfs_mount_request *info);
extern void nfs_umount(const struct nfs_mount_request *info);
/* client.c */
extern const struct rpc_program nfs_program;
extern void nfs_clients_init(struct net *net);
extern struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *);
int nfs_create_rpc_client(struct nfs_client *, const struct nfs_client_initdata *, rpc_authflavor_t);
struct nfs_client *nfs_get_client(const struct nfs_client_initdata *);
int nfs_probe_fsinfo(struct nfs_server *server, struct nfs_fh *, struct nfs_fattr *);
void nfs_server_insert_lists(struct nfs_server *);
void nfs_server_remove_lists(struct nfs_server *);
void nfs_init_timeout_values(struct rpc_timeout *to, int proto, int timeo, int retrans);
int nfs_init_server_rpcclient(struct nfs_server *, const struct rpc_timeout *t,
rpc_authflavor_t);
struct nfs_server *nfs_alloc_server(void);
void nfs_server_copy_userdata(struct nfs_server *, struct nfs_server *);
extern void nfs_cleanup_cb_ident_idr(struct net *);
extern void nfs_put_client(struct nfs_client *);
extern void nfs_free_client(struct nfs_client *);
extern struct nfs_client *nfs4_find_client_ident(struct net *, int);
extern struct nfs_client *
nfs4_find_client_sessionid(struct net *, const struct sockaddr *,
struct nfs4_sessionid *, u32);
extern struct nfs_server *nfs_create_server(struct nfs_mount_info *,
struct nfs_subversion *);
extern struct nfs_server *nfs4_create_server(
struct nfs_mount_info *,
struct nfs_subversion *);
extern struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *,
struct nfs_fh *);
extern int nfs4_update_server(struct nfs_server *server, const char *hostname,
struct sockaddr *sap, size_t salen,
struct net *net);
extern void nfs_free_server(struct nfs_server *server);
extern struct nfs_server *nfs_clone_server(struct nfs_server *,
struct nfs_fh *,
struct nfs_fattr *,
rpc_authflavor_t);
extern bool nfs_client_init_is_complete(const struct nfs_client *clp);
extern int nfs_client_init_status(const struct nfs_client *clp);
extern int nfs_wait_client_init_complete(const struct nfs_client *clp);
extern void nfs_mark_client_ready(struct nfs_client *clp, int state);
extern struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv,
const struct sockaddr *ds_addr,
int ds_addrlen, int ds_proto,
unsigned int ds_timeo,
unsigned int ds_retrans,
u32 minor_version);
extern struct rpc_clnt *nfs4_find_or_create_ds_client(struct nfs_client *,
struct inode *);
extern struct nfs_client *nfs3_set_ds_client(struct nfs_server *mds_srv,
const struct sockaddr *ds_addr, int ds_addrlen,
int ds_proto, unsigned int ds_timeo,
unsigned int ds_retrans);
#ifdef CONFIG_PROC_FS
extern int __init nfs_fs_proc_init(void);
extern void nfs_fs_proc_exit(void);
extern int nfs_fs_proc_net_init(struct net *net);
extern void nfs_fs_proc_net_exit(struct net *net);
#else
static inline int nfs_fs_proc_net_init(struct net *net)
{
return 0;
}
static inline void nfs_fs_proc_net_exit(struct net *net)
{
}
static inline int nfs_fs_proc_init(void)
{
return 0;
}
static inline void nfs_fs_proc_exit(void)
{
}
#endif
/* callback_xdr.c */
extern const struct svc_version nfs4_callback_version1;
extern const struct svc_version nfs4_callback_version4;
struct nfs_pageio_descriptor;
/* pagelist.c */
extern int __init nfs_init_nfspagecache(void);
extern void nfs_destroy_nfspagecache(void);
extern int __init nfs_init_readpagecache(void);
extern void nfs_destroy_readpagecache(void);
extern int __init nfs_init_writepagecache(void);
extern void nfs_destroy_writepagecache(void);
extern int __init nfs_init_directcache(void);
extern void nfs_destroy_directcache(void);
extern void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
struct nfs_pgio_header *hdr,
void (*release)(struct nfs_pgio_header *hdr));
void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos);
int nfs_iocounter_wait(struct nfs_lock_context *l_ctx);
extern const struct nfs_pageio_ops nfs_pgio_rw_ops;
struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *);
void nfs_pgio_header_free(struct nfs_pgio_header *);
int nfs_generic_pgio(struct nfs_pageio_descriptor *, struct nfs_pgio_header *);
int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
struct rpc_cred *cred, const struct nfs_rpc_ops *rpc_ops,
const struct rpc_call_ops *call_ops, int how, int flags);
void nfs_free_request(struct nfs_page *req);
struct nfs_pgio_mirror *
nfs_pgio_current_mirror(struct nfs_pageio_descriptor *desc);
static inline bool nfs_pgio_has_mirroring(struct nfs_pageio_descriptor *desc)
{
WARN_ON_ONCE(desc->pg_mirror_count < 1);
return desc->pg_mirror_count > 1;
}
static inline bool nfs_match_open_context(const struct nfs_open_context *ctx1,
const struct nfs_open_context *ctx2)
{
return ctx1->cred == ctx2->cred && ctx1->state == ctx2->state;
}
/* nfs2xdr.c */
extern const struct rpc_procinfo nfs_procedures[];
extern int nfs2_decode_dirent(struct xdr_stream *,
struct nfs_entry *, bool);
/* nfs3xdr.c */
extern const struct rpc_procinfo nfs3_procedures[];
extern int nfs3_decode_dirent(struct xdr_stream *,
struct nfs_entry *, bool);
/* nfs4xdr.c */
#if IS_ENABLED(CONFIG_NFS_V4)
extern int nfs4_decode_dirent(struct xdr_stream *,
struct nfs_entry *, bool);
#endif
#ifdef CONFIG_NFS_V4_1
extern const u32 nfs41_maxread_overhead;
extern const u32 nfs41_maxwrite_overhead;
extern const u32 nfs41_maxgetdevinfo_overhead;
#endif
/* nfs4proc.c */
#if IS_ENABLED(CONFIG_NFS_V4)
extern const struct rpc_procinfo nfs4_procedures[];
#endif
#ifdef CONFIG_NFS_V4_SECURITY_LABEL
extern struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags);
static inline struct nfs4_label *
nfs4_label_copy(struct nfs4_label *dst, struct nfs4_label *src)
{
if (!dst || !src)
return NULL;
if (src->len > NFS4_MAXLABELLEN)
return NULL;
dst->lfs = src->lfs;
dst->pi = src->pi;
dst->len = src->len;
memcpy(dst->label, src->label, src->len);
return dst;
}
static inline void nfs4_label_free(struct nfs4_label *label)
{
if (label) {
kfree(label->label);
kfree(label);
}
return;
}
static inline void nfs_zap_label_cache_locked(struct nfs_inode *nfsi)
{
if (nfs_server_capable(&nfsi->vfs_inode, NFS_CAP_SECURITY_LABEL))
nfsi->cache_validity |= NFS_INO_INVALID_LABEL;
}
#else
static inline struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags) { return NULL; }
static inline void nfs4_label_free(void *label) {}
static inline void nfs_zap_label_cache_locked(struct nfs_inode *nfsi)
{
}
static inline struct nfs4_label *
nfs4_label_copy(struct nfs4_label *dst, struct nfs4_label *src)
{
return NULL;
}
#endif /* CONFIG_NFS_V4_SECURITY_LABEL */
/* proc.c */
void nfs_close_context(struct nfs_open_context *ctx, int is_sync);
extern struct nfs_client *nfs_init_client(struct nfs_client *clp,
const struct nfs_client_initdata *);
/* dir.c */
extern void nfs_advise_use_readdirplus(struct inode *dir);
extern void nfs_force_use_readdirplus(struct inode *dir);
extern unsigned long nfs_access_cache_count(struct shrinker *shrink,
struct shrink_control *sc);
extern unsigned long nfs_access_cache_scan(struct shrinker *shrink,
struct shrink_control *sc);
struct dentry *nfs_lookup(struct inode *, struct dentry *, unsigned int);
int nfs_create(struct inode *, struct dentry *, umode_t, bool);
int nfs_mkdir(struct inode *, struct dentry *, umode_t);
int nfs_rmdir(struct inode *, struct dentry *);
int nfs_unlink(struct inode *, struct dentry *);
int nfs_symlink(struct inode *, struct dentry *, const char *);
int nfs_link(struct dentry *, struct inode *, struct dentry *);
int nfs_mknod(struct inode *, struct dentry *, umode_t, dev_t);
int nfs_rename(struct inode *, struct dentry *,
struct inode *, struct dentry *, unsigned int);
/* file.c */
int nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync);
loff_t nfs_file_llseek(struct file *, loff_t, int);
ssize_t nfs_file_read(struct kiocb *, struct iov_iter *);
int nfs_file_mmap(struct file *, struct vm_area_struct *);
ssize_t nfs_file_write(struct kiocb *, struct iov_iter *);
int nfs_file_release(struct inode *, struct file *);
int nfs_lock(struct file *, int, struct file_lock *);
int nfs_flock(struct file *, int, struct file_lock *);
int nfs_check_flags(int);
/* inode.c */
extern struct workqueue_struct *nfsiod_workqueue;
extern struct inode *nfs_alloc_inode(struct super_block *sb);
extern void nfs_destroy_inode(struct inode *);
extern int nfs_write_inode(struct inode *, struct writeback_control *);
extern int nfs_drop_inode(struct inode *);
extern void nfs_clear_inode(struct inode *);
extern void nfs_evict_inode(struct inode *);
void nfs_zap_acl_cache(struct inode *inode);
extern bool nfs_check_cache_invalid(struct inode *, unsigned long);
extern int nfs_wait_bit_killable(struct wait_bit_key *key, int mode);
extern int nfs_wait_atomic_killable(atomic_t *p, unsigned int mode);
/* super.c */
extern const struct super_operations nfs_sops;
extern struct file_system_type nfs_fs_type;
extern struct file_system_type nfs_xdev_fs_type;
#if IS_ENABLED(CONFIG_NFS_V4)
extern struct file_system_type nfs4_xdev_fs_type;
extern struct file_system_type nfs4_referral_fs_type;
#endif
bool nfs_auth_info_match(const struct nfs_auth_info *, rpc_authflavor_t);
struct dentry *nfs_try_mount(int, const char *, struct nfs_mount_info *,
struct nfs_subversion *);
int nfs_set_sb_security(struct super_block *, struct dentry *, struct nfs_mount_info *);
int nfs_clone_sb_security(struct super_block *, struct dentry *, struct nfs_mount_info *);
struct dentry *nfs_fs_mount_common(struct nfs_server *, int, const char *,
struct nfs_mount_info *, struct nfs_subversion *);
struct dentry *nfs_fs_mount(struct file_system_type *, int, const char *, void *);
struct dentry * nfs_xdev_mount_common(struct file_system_type *, int,
const char *, struct nfs_mount_info *);
void nfs_kill_super(struct super_block *);
void nfs_fill_super(struct super_block *, struct nfs_mount_info *);
extern struct rpc_stat nfs_rpcstat;
extern int __init register_nfs_fs(void);
extern void __exit unregister_nfs_fs(void);
extern bool nfs_sb_active(struct super_block *sb);
extern void nfs_sb_deactive(struct super_block *sb);
/* io.c */
extern void nfs_start_io_read(struct inode *inode);
extern void nfs_end_io_read(struct inode *inode);
extern void nfs_start_io_write(struct inode *inode);
extern void nfs_end_io_write(struct inode *inode);
extern void nfs_start_io_direct(struct inode *inode);
extern void nfs_end_io_direct(struct inode *inode);
static inline bool nfs_file_io_is_buffered(struct nfs_inode *nfsi)
{
return test_bit(NFS_INO_ODIRECT, &nfsi->flags) == 0;
}
/* namespace.c */
#define NFS_PATH_CANONICAL 1
extern char *nfs_path(char **p, struct dentry *dentry,
char *buffer, ssize_t buflen, unsigned flags);
extern struct vfsmount *nfs_d_automount(struct path *path);
struct vfsmount *nfs_submount(struct nfs_server *, struct dentry *,
struct nfs_fh *, struct nfs_fattr *);
struct vfsmount *nfs_do_submount(struct dentry *, struct nfs_fh *,
struct nfs_fattr *, rpc_authflavor_t);
/* getroot.c */
extern struct dentry *nfs_get_root(struct super_block *, struct nfs_fh *,
const char *);
#if IS_ENABLED(CONFIG_NFS_V4)
extern struct dentry *nfs4_get_root(struct super_block *, struct nfs_fh *,
const char *);
extern int nfs4_get_rootfh(struct nfs_server *server, struct nfs_fh *mntfh, bool);
#endif
struct nfs_pgio_completion_ops;
/* read.c */
extern void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
struct inode *inode, bool force_mds,
const struct nfs_pgio_completion_ops *compl_ops);
extern void nfs_read_prepare(struct rpc_task *task, void *calldata);
extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio);
/* super.c */
void nfs_umount_begin(struct super_block *);
int nfs_statfs(struct dentry *, struct kstatfs *);
int nfs_show_options(struct seq_file *, struct dentry *);
int nfs_show_devname(struct seq_file *, struct dentry *);
int nfs_show_path(struct seq_file *, struct dentry *);
int nfs_show_stats(struct seq_file *, struct dentry *);
int nfs_remount(struct super_block *sb, int *flags, char *raw_data);
/* write.c */
extern void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
struct inode *inode, int ioflags, bool force_mds,
const struct nfs_pgio_completion_ops *compl_ops);
extern void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio);
extern void nfs_commit_free(struct nfs_commit_data *p);
extern void nfs_write_prepare(struct rpc_task *task, void *calldata);
extern void nfs_commit_prepare(struct rpc_task *task, void *calldata);
extern int nfs_initiate_commit(struct rpc_clnt *clnt,
struct nfs_commit_data *data,
const struct nfs_rpc_ops *nfs_ops,
const struct rpc_call_ops *call_ops,
int how, int flags);
extern void nfs_init_commit(struct nfs_commit_data *data,
struct list_head *head,
struct pnfs_layout_segment *lseg,
struct nfs_commit_info *cinfo);
int nfs_scan_commit_list(struct list_head *src, struct list_head *dst,
struct nfs_commit_info *cinfo, int max);
unsigned long nfs_reqs_to_commit(struct nfs_commit_info *);
int nfs_scan_commit(struct inode *inode, struct list_head *dst,
struct nfs_commit_info *cinfo);
void nfs_mark_request_commit(struct nfs_page *req,
struct pnfs_layout_segment *lseg,
struct nfs_commit_info *cinfo,
u32 ds_commit_idx);
int nfs_write_need_commit(struct nfs_pgio_header *);
void nfs_writeback_update_inode(struct nfs_pgio_header *hdr);
int nfs_generic_commit_list(struct inode *inode, struct list_head *head,
int how, struct nfs_commit_info *cinfo);
void nfs_retry_commit(struct list_head *page_list,
struct pnfs_layout_segment *lseg,
struct nfs_commit_info *cinfo,
u32 ds_commit_idx);
void nfs_commitdata_release(struct nfs_commit_data *data);
void nfs_request_add_commit_list(struct nfs_page *req,
struct nfs_commit_info *cinfo);
void nfs_request_add_commit_list_locked(struct nfs_page *req,
struct list_head *dst,
struct nfs_commit_info *cinfo);
void nfs_request_remove_commit_list(struct nfs_page *req,
struct nfs_commit_info *cinfo);
void nfs_init_cinfo(struct nfs_commit_info *cinfo,
struct inode *inode,
struct nfs_direct_req *dreq);
int nfs_key_timeout_notify(struct file *filp, struct inode *inode);
bool nfs_ctx_key_to_expire(struct nfs_open_context *ctx, struct inode *inode);
void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor *pgio);
int nfs_filemap_write_and_wait_range(struct address_space *mapping,
loff_t lstart, loff_t lend);
#ifdef CONFIG_NFS_V4_1
static inline
void nfs_clear_pnfs_ds_commit_verifiers(struct pnfs_ds_commit_info *cinfo)
{
int i;
for (i = 0; i < cinfo->nbuckets; i++)
cinfo->buckets[i].direct_verf.committed = NFS_INVALID_STABLE_HOW;
}
#else
static inline
void nfs_clear_pnfs_ds_commit_verifiers(struct pnfs_ds_commit_info *cinfo)
{
}
#endif
#ifdef CONFIG_MIGRATION
extern int nfs_migrate_page(struct address_space *,
struct page *, struct page *, enum migrate_mode);
#endif
static inline int
nfs_write_verifier_cmp(const struct nfs_write_verifier *v1,
const struct nfs_write_verifier *v2)
{
return memcmp(v1->data, v2->data, sizeof(v1->data));
}
/* unlink.c */
extern struct rpc_task *
nfs_async_rename(struct inode *old_dir, struct inode *new_dir,
struct dentry *old_dentry, struct dentry *new_dentry,
void (*complete)(struct rpc_task *, struct nfs_renamedata *));
extern int nfs_sillyrename(struct inode *dir, struct dentry *dentry);
/* direct.c */
void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
struct nfs_direct_req *dreq);
extern ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq);
/* nfs4proc.c */
extern struct nfs_client *nfs4_init_client(struct nfs_client *clp,
const struct nfs_client_initdata *);
extern int nfs40_walk_client_list(struct nfs_client *clp,
struct nfs_client **result,
struct rpc_cred *cred);
extern int nfs41_walk_client_list(struct nfs_client *clp,
struct nfs_client **result,
struct rpc_cred *cred);
extern int nfs4_test_session_trunk(struct rpc_clnt *,
struct rpc_xprt *,
void *);
static inline struct inode *nfs_igrab_and_active(struct inode *inode)
{
inode = igrab(inode);
if (inode != NULL && !nfs_sb_active(inode->i_sb)) {
iput(inode);
inode = NULL;
}
return inode;
}
static inline void nfs_iput_and_deactive(struct inode *inode)
{
if (inode != NULL) {
struct super_block *sb = inode->i_sb;
iput(inode);
nfs_sb_deactive(sb);
}
}
/*
* Determine the device name as a string
*/
static inline char *nfs_devname(struct dentry *dentry,
char *buffer, ssize_t buflen)
{
char *dummy;
return nfs_path(&dummy, dentry, buffer, buflen, NFS_PATH_CANONICAL);
}
/*
* Determine the actual block size (and log2 thereof)
*/
static inline
unsigned long nfs_block_bits(unsigned long bsize, unsigned char *nrbitsp)
{
/* make sure blocksize is a power of two */
if ((bsize & (bsize - 1)) || nrbitsp) {
unsigned char nrbits;
for (nrbits = 31; nrbits && !(bsize & (1 << nrbits)); nrbits--)
;
bsize = 1 << nrbits;
if (nrbitsp)
*nrbitsp = nrbits;
}
return bsize;
}
/*
* Calculate the number of 512byte blocks used.
*/
static inline blkcnt_t nfs_calc_block_size(u64 tsize)
{
blkcnt_t used = (tsize + 511) >> 9;
return (used > ULONG_MAX) ? ULONG_MAX : used;
}
/*
* Compute and set NFS server blocksize
*/
static inline
unsigned long nfs_block_size(unsigned long bsize, unsigned char *nrbitsp)
{
if (bsize < NFS_MIN_FILE_IO_SIZE)
bsize = NFS_DEF_FILE_IO_SIZE;
else if (bsize >= NFS_MAX_FILE_IO_SIZE)
bsize = NFS_MAX_FILE_IO_SIZE;
return nfs_block_bits(bsize, nrbitsp);
}
/*
* Determine the maximum file size for a superblock
*/
static inline
void nfs_super_set_maxbytes(struct super_block *sb, __u64 maxfilesize)
{
sb->s_maxbytes = (loff_t)maxfilesize;
if (sb->s_maxbytes > MAX_LFS_FILESIZE || sb->s_maxbytes <= 0)
sb->s_maxbytes = MAX_LFS_FILESIZE;
}
/*
* Record the page as unstable and mark its inode as dirty.
*/
static inline
void nfs_mark_page_unstable(struct page *page, struct nfs_commit_info *cinfo)
{
if (!cinfo->dreq) {
struct inode *inode = page_file_mapping(page)->host;
inc_node_page_state(page, NR_UNSTABLE_NFS);
inc_wb_stat(&inode_to_bdi(inode)->wb, WB_RECLAIMABLE);
__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
}
}
/*
* Determine the number of bytes of data the page contains
*/
static inline
unsigned int nfs_page_length(struct page *page)
{
loff_t i_size = i_size_read(page_file_mapping(page)->host);
if (i_size > 0) {
pgoff_t index = page_index(page);
pgoff_t end_index = (i_size - 1) >> PAGE_SHIFT;
if (index < end_index)
return PAGE_SIZE;
if (index == end_index)
return ((i_size - 1) & ~PAGE_MASK) + 1;
}
return 0;
}
/*
* Convert a umode to a dirent->d_type
*/
static inline
unsigned char nfs_umode_to_dtype(umode_t mode)
{
return (mode >> 12) & 15;
}
/*
* Determine the number of pages in an array of length 'len' and
* with a base offset of 'base'
*/
static inline
unsigned int nfs_page_array_len(unsigned int base, size_t len)
{
return ((unsigned long)len + (unsigned long)base +
PAGE_SIZE - 1) >> PAGE_SHIFT;
}
/*
* Convert a struct timespec into a 64-bit change attribute
*
* This does approximately the same thing as timespec_to_ns(),
* but for calculation efficiency, we multiply the seconds by
* 1024*1024*1024.
*/
static inline
u64 nfs_timespec_to_change_attr(const struct timespec *ts)
{
return ((u64)ts->tv_sec << 30) + ts->tv_nsec;
}
#ifdef CONFIG_CRC32
/**
* nfs_fhandle_hash - calculate the crc32 hash for the filehandle
* @fh - pointer to filehandle
*
* returns a crc32 hash for the filehandle that is compatible with
* the one displayed by "wireshark".
*/
static inline u32 nfs_fhandle_hash(const struct nfs_fh *fh)
{
return ~crc32_le(0xFFFFFFFF, &fh->data[0], fh->size);
}
static inline u32 nfs_stateid_hash(const nfs4_stateid *stateid)
{
return ~crc32_le(0xFFFFFFFF, &stateid->other[0],
NFS4_STATEID_OTHER_SIZE);
}
#else
static inline u32 nfs_fhandle_hash(const struct nfs_fh *fh)
{
return 0;
}
static inline u32 nfs_stateid_hash(nfs4_stateid *stateid)
{
return 0;
}
#endif
static inline bool nfs_error_is_fatal(int err)
{
switch (err) {
case -ERESTARTSYS:
case -EACCES:
case -EDQUOT:
case -EFBIG:
case -EIO:
case -ENOSPC:
case -EROFS:
case -ESTALE:
case -E2BIG:
return true;
default:
return false;
}
}
static inline void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
{
ctx->error = error;
smp_wmb();
set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
}
| {
"pile_set_name": "Github"
} |
<?php
/*
|--------------------------------------------------------------------------
| Web Routes
|--------------------------------------------------------------------------
|
| Here is where you can register web routes for your application. These
| routes are loaded by the RouteServiceProvider within a group which
| contains the "web" middleware group. Now create something great!
|
*/
Route::get('/', 'ApplicationController@index');
Route::get('/user/{id}', 'UserController@show');
Route::post('/user', 'UserController@create');
| {
"pile_set_name": "Github"
} |
#ifndef SASS_ERROR_HANDLING_H
#define SASS_ERROR_HANDLING_H
#include <string>
#include <sstream>
#include <stdexcept>
#include "position.hpp"
#include "backtrace.hpp"
#include "ast_fwd_decl.hpp"
#include "sass/functions.h"
namespace Sass {
struct Backtrace;
namespace Exception {
const std::string def_msg = "Invalid sass detected";
const std::string def_op_msg = "Undefined operation";
const std::string def_op_null_msg = "Invalid null operation";
const std::string def_nesting_limit = "Code too deeply neested";
class Base : public std::runtime_error {
protected:
std::string msg;
std::string prefix;
public:
ParserState pstate;
Backtraces traces;
public:
Base(ParserState pstate, std::string msg, Backtraces traces);
virtual const char* errtype() const { return prefix.c_str(); }
virtual const char* what() const throw() { return msg.c_str(); }
virtual ~Base() throw() {};
};
class InvalidSass : public Base {
public:
InvalidSass(ParserState pstate, Backtraces traces, std::string msg);
virtual ~InvalidSass() throw() {};
};
class InvalidParent : public Base {
protected:
Selector_Ptr parent;
Selector_Ptr selector;
public:
InvalidParent(Selector_Ptr parent, Backtraces traces, Selector_Ptr selector);
virtual ~InvalidParent() throw() {};
};
class MissingArgument : public Base {
protected:
std::string fn;
std::string arg;
std::string fntype;
public:
MissingArgument(ParserState pstate, Backtraces traces, std::string fn, std::string arg, std::string fntype);
virtual ~MissingArgument() throw() {};
};
class InvalidArgumentType : public Base {
protected:
std::string fn;
std::string arg;
std::string type;
const Value_Ptr value;
public:
InvalidArgumentType(ParserState pstate, Backtraces traces, std::string fn, std::string arg, std::string type, const Value_Ptr value = 0);
virtual ~InvalidArgumentType() throw() {};
};
class InvalidVarKwdType : public Base {
protected:
std::string name;
const Argument_Ptr arg;
public:
InvalidVarKwdType(ParserState pstate, Backtraces traces, std::string name, const Argument_Ptr arg = 0);
virtual ~InvalidVarKwdType() throw() {};
};
class InvalidSyntax : public Base {
public:
InvalidSyntax(ParserState pstate, Backtraces traces, std::string msg);
virtual ~InvalidSyntax() throw() {};
};
class NestingLimitError : public Base {
public:
NestingLimitError(ParserState pstate, Backtraces traces, std::string msg = def_nesting_limit);
virtual ~NestingLimitError() throw() {};
};
class DuplicateKeyError : public Base {
protected:
const Map& dup;
const Expression& org;
public:
DuplicateKeyError(Backtraces traces, const Map& dup, const Expression& org);
virtual const char* errtype() const { return "Error"; }
virtual ~DuplicateKeyError() throw() {};
};
class TypeMismatch : public Base {
protected:
const Expression& var;
const std::string type;
public:
TypeMismatch(Backtraces traces, const Expression& var, const std::string type);
virtual const char* errtype() const { return "Error"; }
virtual ~TypeMismatch() throw() {};
};
class InvalidValue : public Base {
protected:
const Expression& val;
public:
InvalidValue(Backtraces traces, const Expression& val);
virtual const char* errtype() const { return "Error"; }
virtual ~InvalidValue() throw() {};
};
class StackError : public Base {
protected:
const AST_Node& node;
public:
StackError(Backtraces traces, const AST_Node& node);
virtual const char* errtype() const { return "SystemStackError"; }
virtual ~StackError() throw() {};
};
/* common virtual base class (has no pstate or trace) */
class OperationError : public std::runtime_error {
protected:
std::string msg;
public:
OperationError(std::string msg = def_op_msg)
: std::runtime_error(msg), msg(msg)
{};
public:
virtual const char* errtype() const { return "Error"; }
virtual const char* what() const throw() { return msg.c_str(); }
virtual ~OperationError() throw() {};
};
class ZeroDivisionError : public OperationError {
protected:
const Expression& lhs;
const Expression& rhs;
public:
ZeroDivisionError(const Expression& lhs, const Expression& rhs);
virtual const char* errtype() const { return "ZeroDivisionError"; }
virtual ~ZeroDivisionError() throw() {};
};
class IncompatibleUnits : public OperationError {
protected:
// const Sass::UnitType lhs;
// const Sass::UnitType rhs;
public:
IncompatibleUnits(const Units& lhs, const Units& rhs);
IncompatibleUnits(const UnitType lhs, const UnitType rhs);
virtual ~IncompatibleUnits() throw() {};
};
class UndefinedOperation : public OperationError {
protected:
Expression_Ptr_Const lhs;
Expression_Ptr_Const rhs;
const Sass_OP op;
public:
UndefinedOperation(Expression_Ptr_Const lhs, Expression_Ptr_Const rhs, enum Sass_OP op);
// virtual const char* errtype() const { return "Error"; }
virtual ~UndefinedOperation() throw() {};
};
class InvalidNullOperation : public UndefinedOperation {
public:
InvalidNullOperation(Expression_Ptr_Const lhs, Expression_Ptr_Const rhs, enum Sass_OP op);
virtual ~InvalidNullOperation() throw() {};
};
class AlphaChannelsNotEqual : public OperationError {
protected:
Expression_Ptr_Const lhs;
Expression_Ptr_Const rhs;
const Sass_OP op;
public:
AlphaChannelsNotEqual(Expression_Ptr_Const lhs, Expression_Ptr_Const rhs, enum Sass_OP op);
// virtual const char* errtype() const { return "Error"; }
virtual ~AlphaChannelsNotEqual() throw() {};
};
class SassValueError : public Base {
public:
SassValueError(Backtraces traces, ParserState pstate, OperationError& err);
virtual ~SassValueError() throw() {};
};
}
void warn(std::string msg, ParserState pstate);
void warn(std::string msg, ParserState pstate, Backtrace* bt);
void warning(std::string msg, ParserState pstate);
void deprecated_function(std::string msg, ParserState pstate);
void deprecated(std::string msg, std::string msg2, bool with_column, ParserState pstate);
void deprecated_bind(std::string msg, ParserState pstate);
// void deprecated(std::string msg, ParserState pstate, Backtrace* bt);
void coreError(std::string msg, ParserState pstate);
void error(std::string msg, ParserState pstate, Backtraces& traces);
}
#endif
| {
"pile_set_name": "Github"
} |
# created by tools/loadICU.tcl -- do not edit
namespace eval ::tcl::clock {
::msgcat::mcset bn DAYS_OF_WEEK_ABBREV [list \
"\u09b0\u09ac\u09bf"\
"\u09b8\u09cb\u09ae"\
"\u09ae\u0999\u0997\u09b2"\
"\u09ac\u09c1\u09a7"\
"\u09ac\u09c3\u09b9\u09b8\u09cd\u09aa\u09a4\u09bf"\
"\u09b6\u09c1\u0995\u09cd\u09b0"\
"\u09b6\u09a8\u09bf"]
::msgcat::mcset bn DAYS_OF_WEEK_FULL [list \
"\u09b0\u09ac\u09bf\u09ac\u09be\u09b0"\
"\u09b8\u09cb\u09ae\u09ac\u09be\u09b0"\
"\u09ae\u0999\u0997\u09b2\u09ac\u09be\u09b0"\
"\u09ac\u09c1\u09a7\u09ac\u09be\u09b0"\
"\u09ac\u09c3\u09b9\u09b8\u09cd\u09aa\u09a4\u09bf\u09ac\u09be\u09b0"\
"\u09b6\u09c1\u0995\u09cd\u09b0\u09ac\u09be\u09b0"\
"\u09b6\u09a8\u09bf\u09ac\u09be\u09b0"]
::msgcat::mcset bn MONTHS_ABBREV [list \
"\u099c\u09be\u09a8\u09c1\u09df\u09be\u09b0\u09c0"\
"\u09ab\u09c7\u09ac\u09cd\u09b0\u09c1\u09df\u09be\u09b0\u09c0"\
"\u09ae\u09be\u09b0\u09cd\u099a"\
"\u098f\u09aa\u09cd\u09b0\u09bf\u09b2"\
"\u09ae\u09c7"\
"\u099c\u09c1\u09a8"\
"\u099c\u09c1\u09b2\u09be\u0987"\
"\u0986\u0997\u09b8\u09cd\u099f"\
"\u09b8\u09c7\u09aa\u09cd\u099f\u09c7\u09ae\u09cd\u09ac\u09b0"\
"\u0985\u0995\u09cd\u099f\u09cb\u09ac\u09b0"\
"\u09a8\u09ad\u09c7\u09ae\u09cd\u09ac\u09b0"\
"\u09a1\u09bf\u09b8\u09c7\u09ae\u09cd\u09ac\u09b0"\
""]
::msgcat::mcset bn MONTHS_FULL [list \
"\u099c\u09be\u09a8\u09c1\u09df\u09be\u09b0\u09c0"\
"\u09ab\u09c7\u09ac\u09cd\u09b0\u09c1\u09df\u09be\u09b0\u09c0"\
"\u09ae\u09be\u09b0\u09cd\u099a"\
"\u098f\u09aa\u09cd\u09b0\u09bf\u09b2"\
"\u09ae\u09c7"\
"\u099c\u09c1\u09a8"\
"\u099c\u09c1\u09b2\u09be\u0987"\
"\u0986\u0997\u09b8\u09cd\u099f"\
"\u09b8\u09c7\u09aa\u09cd\u099f\u09c7\u09ae\u09cd\u09ac\u09b0"\
"\u0985\u0995\u09cd\u099f\u09cb\u09ac\u09b0"\
"\u09a8\u09ad\u09c7\u09ae\u09cd\u09ac\u09b0"\
"\u09a1\u09bf\u09b8\u09c7\u09ae\u09cd\u09ac\u09b0"\
""]
::msgcat::mcset bn AM "\u09aa\u09c2\u09b0\u09cd\u09ac\u09be\u09b9\u09cd\u09a3"
::msgcat::mcset bn PM "\u0985\u09aa\u09b0\u09be\u09b9\u09cd\u09a3"
}
| {
"pile_set_name": "Github"
} |
'use strict';
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.default = timeLimit;
var _mapLimit = require('./mapLimit');
var _mapLimit2 = _interopRequireDefault(_mapLimit);
var _baseRange = require('lodash/_baseRange');
var _baseRange2 = _interopRequireDefault(_baseRange);
var _wrapAsync = require('./internal/wrapAsync');
var _wrapAsync2 = _interopRequireDefault(_wrapAsync);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
/**
* The same as [times]{@link module:ControlFlow.times} but runs a maximum of `limit` async operations at a
* time.
*
* @name timesLimit
* @static
* @memberOf module:ControlFlow
* @method
* @see [async.times]{@link module:ControlFlow.times}
* @category Control Flow
* @param {number} count - The number of times to run the function.
* @param {number} limit - The maximum number of async operations at a time.
* @param {AsyncFunction} iteratee - The async function to call `n` times.
* Invoked with the iteration index and a callback: (n, next).
* @param {Function} callback - see [async.map]{@link module:Collections.map}.
*/
function timeLimit(count, limit, iteratee, callback) {
var _iteratee = (0, _wrapAsync2.default)(iteratee);
(0, _mapLimit2.default)((0, _baseRange2.default)(0, count, 1), limit, _iteratee, callback);
}
module.exports = exports['default']; | {
"pile_set_name": "Github"
} |
---
title: 在条件编译表达式中不允许有非内部的类型名称
ms.date: 07/20/2015
f1_keywords:
- bc31426
- vbc31426
helpviewer_keywords:
- BC31426
ms.assetid: 73deeafa-4dbe-4c27-aff7-c6bea0e37d5a
ms.openlocfilehash: 58b945fcd7b7efc42f6e6ef93fb4ccd73ffabc38
ms.sourcegitcommit: bf5c5850654187705bc94cc40ebfb62fe346ab02
ms.translationtype: MT
ms.contentlocale: zh-CN
ms.lasthandoff: 09/23/2020
ms.locfileid: "91062819"
---
# <a name="non-intrinsic-type-names-are-not-allowed-in-conditional-compilation-expressions"></a>在条件编译表达式中不允许有非内部的类型名称
条件编译表达式引用非内部类型,但仅允许使用内部类型。
有关 Visual Basic 中的内部类型的详细信息,请参阅 [数据类型摘要](../language-reference/keywords/data-types-summary.md)。
**错误 ID:** BC31426
## <a name="to-correct-this-error"></a>更正此错误
- 将非内部类型替换为内部类型。
## <a name="see-also"></a>请参阅
- [Visual Basic 中的条件编译](../programming-guide/program-structure/conditional-compilation.md)
- [Data Types Summary](../language-reference/keywords/data-types-summary.md)
| {
"pile_set_name": "Github"
} |
# /* **************************************************************************
# * *
# * (C) Copyright Paul Mensonides 2002.
# * Distributed under the Boost Software License, Version 1.0. (See
# * accompanying file LICENSE_1_0.txt or copy at
# * http://www.boost.org/LICENSE_1_0.txt)
# * *
# ************************************************************************** */
#
# /* See http://www.boost.org for most recent version. */
#
# if defined(BOOST_PP_ITERATION_LIMITS)
# if !defined(BOOST_PP_FILENAME_1)
# error BOOST_PP_ERROR: depth #1 filename is not defined
# endif
# define BOOST_PP_VALUE BOOST_PP_TUPLE_ELEM(2, 0, BOOST_PP_ITERATION_LIMITS)
# include <boost/preprocessor/iteration/detail/bounds/lower1.hpp>
# define BOOST_PP_VALUE BOOST_PP_TUPLE_ELEM(2, 1, BOOST_PP_ITERATION_LIMITS)
# include <boost/preprocessor/iteration/detail/bounds/upper1.hpp>
# define BOOST_PP_ITERATION_FLAGS_1() 0
# undef BOOST_PP_ITERATION_LIMITS
# elif defined(BOOST_PP_ITERATION_PARAMS_1)
# define BOOST_PP_VALUE BOOST_PP_ARRAY_ELEM(0, BOOST_PP_ITERATION_PARAMS_1)
# include <boost/preprocessor/iteration/detail/bounds/lower1.hpp>
# define BOOST_PP_VALUE BOOST_PP_ARRAY_ELEM(1, BOOST_PP_ITERATION_PARAMS_1)
# include <boost/preprocessor/iteration/detail/bounds/upper1.hpp>
# define BOOST_PP_FILENAME_1 BOOST_PP_ARRAY_ELEM(2, BOOST_PP_ITERATION_PARAMS_1)
# if BOOST_PP_ARRAY_SIZE(BOOST_PP_ITERATION_PARAMS_1) >= 4
# define BOOST_PP_ITERATION_FLAGS_1() BOOST_PP_ARRAY_ELEM(3, BOOST_PP_ITERATION_PARAMS_1)
# else
# define BOOST_PP_ITERATION_FLAGS_1() 0
# endif
# else
# error BOOST_PP_ERROR: depth #1 iteration boundaries or filename not defined
# endif
#
# undef BOOST_PP_ITERATION_DEPTH
# define BOOST_PP_ITERATION_DEPTH() 1
#
# define BOOST_PP_IS_ITERATING 1
#
# if (BOOST_PP_ITERATION_START_1) > (BOOST_PP_ITERATION_FINISH_1)
# include <boost/preprocessor/iteration/detail/iter/reverse1.hpp>
# else
# if BOOST_PP_ITERATION_START_1 <= 0 && BOOST_PP_ITERATION_FINISH_1 >= 0
# define BOOST_PP_ITERATION_1 0
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 1 && BOOST_PP_ITERATION_FINISH_1 >= 1
# define BOOST_PP_ITERATION_1 1
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 2 && BOOST_PP_ITERATION_FINISH_1 >= 2
# define BOOST_PP_ITERATION_1 2
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 3 && BOOST_PP_ITERATION_FINISH_1 >= 3
# define BOOST_PP_ITERATION_1 3
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 4 && BOOST_PP_ITERATION_FINISH_1 >= 4
# define BOOST_PP_ITERATION_1 4
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 5 && BOOST_PP_ITERATION_FINISH_1 >= 5
# define BOOST_PP_ITERATION_1 5
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 6 && BOOST_PP_ITERATION_FINISH_1 >= 6
# define BOOST_PP_ITERATION_1 6
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 7 && BOOST_PP_ITERATION_FINISH_1 >= 7
# define BOOST_PP_ITERATION_1 7
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 8 && BOOST_PP_ITERATION_FINISH_1 >= 8
# define BOOST_PP_ITERATION_1 8
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 9 && BOOST_PP_ITERATION_FINISH_1 >= 9
# define BOOST_PP_ITERATION_1 9
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 10 && BOOST_PP_ITERATION_FINISH_1 >= 10
# define BOOST_PP_ITERATION_1 10
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 11 && BOOST_PP_ITERATION_FINISH_1 >= 11
# define BOOST_PP_ITERATION_1 11
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 12 && BOOST_PP_ITERATION_FINISH_1 >= 12
# define BOOST_PP_ITERATION_1 12
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 13 && BOOST_PP_ITERATION_FINISH_1 >= 13
# define BOOST_PP_ITERATION_1 13
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 14 && BOOST_PP_ITERATION_FINISH_1 >= 14
# define BOOST_PP_ITERATION_1 14
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 15 && BOOST_PP_ITERATION_FINISH_1 >= 15
# define BOOST_PP_ITERATION_1 15
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 16 && BOOST_PP_ITERATION_FINISH_1 >= 16
# define BOOST_PP_ITERATION_1 16
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 17 && BOOST_PP_ITERATION_FINISH_1 >= 17
# define BOOST_PP_ITERATION_1 17
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 18 && BOOST_PP_ITERATION_FINISH_1 >= 18
# define BOOST_PP_ITERATION_1 18
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 19 && BOOST_PP_ITERATION_FINISH_1 >= 19
# define BOOST_PP_ITERATION_1 19
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 20 && BOOST_PP_ITERATION_FINISH_1 >= 20
# define BOOST_PP_ITERATION_1 20
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 21 && BOOST_PP_ITERATION_FINISH_1 >= 21
# define BOOST_PP_ITERATION_1 21
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 22 && BOOST_PP_ITERATION_FINISH_1 >= 22
# define BOOST_PP_ITERATION_1 22
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 23 && BOOST_PP_ITERATION_FINISH_1 >= 23
# define BOOST_PP_ITERATION_1 23
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 24 && BOOST_PP_ITERATION_FINISH_1 >= 24
# define BOOST_PP_ITERATION_1 24
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 25 && BOOST_PP_ITERATION_FINISH_1 >= 25
# define BOOST_PP_ITERATION_1 25
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 26 && BOOST_PP_ITERATION_FINISH_1 >= 26
# define BOOST_PP_ITERATION_1 26
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 27 && BOOST_PP_ITERATION_FINISH_1 >= 27
# define BOOST_PP_ITERATION_1 27
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 28 && BOOST_PP_ITERATION_FINISH_1 >= 28
# define BOOST_PP_ITERATION_1 28
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 29 && BOOST_PP_ITERATION_FINISH_1 >= 29
# define BOOST_PP_ITERATION_1 29
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 30 && BOOST_PP_ITERATION_FINISH_1 >= 30
# define BOOST_PP_ITERATION_1 30
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 31 && BOOST_PP_ITERATION_FINISH_1 >= 31
# define BOOST_PP_ITERATION_1 31
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 32 && BOOST_PP_ITERATION_FINISH_1 >= 32
# define BOOST_PP_ITERATION_1 32
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 33 && BOOST_PP_ITERATION_FINISH_1 >= 33
# define BOOST_PP_ITERATION_1 33
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 34 && BOOST_PP_ITERATION_FINISH_1 >= 34
# define BOOST_PP_ITERATION_1 34
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 35 && BOOST_PP_ITERATION_FINISH_1 >= 35
# define BOOST_PP_ITERATION_1 35
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 36 && BOOST_PP_ITERATION_FINISH_1 >= 36
# define BOOST_PP_ITERATION_1 36
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 37 && BOOST_PP_ITERATION_FINISH_1 >= 37
# define BOOST_PP_ITERATION_1 37
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 38 && BOOST_PP_ITERATION_FINISH_1 >= 38
# define BOOST_PP_ITERATION_1 38
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 39 && BOOST_PP_ITERATION_FINISH_1 >= 39
# define BOOST_PP_ITERATION_1 39
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 40 && BOOST_PP_ITERATION_FINISH_1 >= 40
# define BOOST_PP_ITERATION_1 40
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 41 && BOOST_PP_ITERATION_FINISH_1 >= 41
# define BOOST_PP_ITERATION_1 41
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 42 && BOOST_PP_ITERATION_FINISH_1 >= 42
# define BOOST_PP_ITERATION_1 42
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 43 && BOOST_PP_ITERATION_FINISH_1 >= 43
# define BOOST_PP_ITERATION_1 43
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 44 && BOOST_PP_ITERATION_FINISH_1 >= 44
# define BOOST_PP_ITERATION_1 44
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 45 && BOOST_PP_ITERATION_FINISH_1 >= 45
# define BOOST_PP_ITERATION_1 45
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 46 && BOOST_PP_ITERATION_FINISH_1 >= 46
# define BOOST_PP_ITERATION_1 46
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 47 && BOOST_PP_ITERATION_FINISH_1 >= 47
# define BOOST_PP_ITERATION_1 47
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 48 && BOOST_PP_ITERATION_FINISH_1 >= 48
# define BOOST_PP_ITERATION_1 48
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 49 && BOOST_PP_ITERATION_FINISH_1 >= 49
# define BOOST_PP_ITERATION_1 49
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 50 && BOOST_PP_ITERATION_FINISH_1 >= 50
# define BOOST_PP_ITERATION_1 50
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 51 && BOOST_PP_ITERATION_FINISH_1 >= 51
# define BOOST_PP_ITERATION_1 51
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 52 && BOOST_PP_ITERATION_FINISH_1 >= 52
# define BOOST_PP_ITERATION_1 52
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 53 && BOOST_PP_ITERATION_FINISH_1 >= 53
# define BOOST_PP_ITERATION_1 53
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 54 && BOOST_PP_ITERATION_FINISH_1 >= 54
# define BOOST_PP_ITERATION_1 54
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 55 && BOOST_PP_ITERATION_FINISH_1 >= 55
# define BOOST_PP_ITERATION_1 55
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 56 && BOOST_PP_ITERATION_FINISH_1 >= 56
# define BOOST_PP_ITERATION_1 56
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 57 && BOOST_PP_ITERATION_FINISH_1 >= 57
# define BOOST_PP_ITERATION_1 57
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 58 && BOOST_PP_ITERATION_FINISH_1 >= 58
# define BOOST_PP_ITERATION_1 58
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 59 && BOOST_PP_ITERATION_FINISH_1 >= 59
# define BOOST_PP_ITERATION_1 59
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 60 && BOOST_PP_ITERATION_FINISH_1 >= 60
# define BOOST_PP_ITERATION_1 60
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 61 && BOOST_PP_ITERATION_FINISH_1 >= 61
# define BOOST_PP_ITERATION_1 61
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 62 && BOOST_PP_ITERATION_FINISH_1 >= 62
# define BOOST_PP_ITERATION_1 62
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 63 && BOOST_PP_ITERATION_FINISH_1 >= 63
# define BOOST_PP_ITERATION_1 63
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 64 && BOOST_PP_ITERATION_FINISH_1 >= 64
# define BOOST_PP_ITERATION_1 64
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 65 && BOOST_PP_ITERATION_FINISH_1 >= 65
# define BOOST_PP_ITERATION_1 65
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 66 && BOOST_PP_ITERATION_FINISH_1 >= 66
# define BOOST_PP_ITERATION_1 66
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 67 && BOOST_PP_ITERATION_FINISH_1 >= 67
# define BOOST_PP_ITERATION_1 67
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 68 && BOOST_PP_ITERATION_FINISH_1 >= 68
# define BOOST_PP_ITERATION_1 68
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 69 && BOOST_PP_ITERATION_FINISH_1 >= 69
# define BOOST_PP_ITERATION_1 69
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 70 && BOOST_PP_ITERATION_FINISH_1 >= 70
# define BOOST_PP_ITERATION_1 70
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 71 && BOOST_PP_ITERATION_FINISH_1 >= 71
# define BOOST_PP_ITERATION_1 71
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 72 && BOOST_PP_ITERATION_FINISH_1 >= 72
# define BOOST_PP_ITERATION_1 72
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 73 && BOOST_PP_ITERATION_FINISH_1 >= 73
# define BOOST_PP_ITERATION_1 73
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 74 && BOOST_PP_ITERATION_FINISH_1 >= 74
# define BOOST_PP_ITERATION_1 74
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 75 && BOOST_PP_ITERATION_FINISH_1 >= 75
# define BOOST_PP_ITERATION_1 75
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 76 && BOOST_PP_ITERATION_FINISH_1 >= 76
# define BOOST_PP_ITERATION_1 76
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 77 && BOOST_PP_ITERATION_FINISH_1 >= 77
# define BOOST_PP_ITERATION_1 77
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 78 && BOOST_PP_ITERATION_FINISH_1 >= 78
# define BOOST_PP_ITERATION_1 78
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 79 && BOOST_PP_ITERATION_FINISH_1 >= 79
# define BOOST_PP_ITERATION_1 79
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 80 && BOOST_PP_ITERATION_FINISH_1 >= 80
# define BOOST_PP_ITERATION_1 80
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 81 && BOOST_PP_ITERATION_FINISH_1 >= 81
# define BOOST_PP_ITERATION_1 81
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 82 && BOOST_PP_ITERATION_FINISH_1 >= 82
# define BOOST_PP_ITERATION_1 82
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 83 && BOOST_PP_ITERATION_FINISH_1 >= 83
# define BOOST_PP_ITERATION_1 83
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 84 && BOOST_PP_ITERATION_FINISH_1 >= 84
# define BOOST_PP_ITERATION_1 84
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 85 && BOOST_PP_ITERATION_FINISH_1 >= 85
# define BOOST_PP_ITERATION_1 85
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 86 && BOOST_PP_ITERATION_FINISH_1 >= 86
# define BOOST_PP_ITERATION_1 86
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 87 && BOOST_PP_ITERATION_FINISH_1 >= 87
# define BOOST_PP_ITERATION_1 87
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 88 && BOOST_PP_ITERATION_FINISH_1 >= 88
# define BOOST_PP_ITERATION_1 88
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 89 && BOOST_PP_ITERATION_FINISH_1 >= 89
# define BOOST_PP_ITERATION_1 89
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 90 && BOOST_PP_ITERATION_FINISH_1 >= 90
# define BOOST_PP_ITERATION_1 90
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 91 && BOOST_PP_ITERATION_FINISH_1 >= 91
# define BOOST_PP_ITERATION_1 91
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 92 && BOOST_PP_ITERATION_FINISH_1 >= 92
# define BOOST_PP_ITERATION_1 92
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 93 && BOOST_PP_ITERATION_FINISH_1 >= 93
# define BOOST_PP_ITERATION_1 93
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 94 && BOOST_PP_ITERATION_FINISH_1 >= 94
# define BOOST_PP_ITERATION_1 94
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 95 && BOOST_PP_ITERATION_FINISH_1 >= 95
# define BOOST_PP_ITERATION_1 95
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 96 && BOOST_PP_ITERATION_FINISH_1 >= 96
# define BOOST_PP_ITERATION_1 96
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 97 && BOOST_PP_ITERATION_FINISH_1 >= 97
# define BOOST_PP_ITERATION_1 97
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 98 && BOOST_PP_ITERATION_FINISH_1 >= 98
# define BOOST_PP_ITERATION_1 98
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 99 && BOOST_PP_ITERATION_FINISH_1 >= 99
# define BOOST_PP_ITERATION_1 99
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 100 && BOOST_PP_ITERATION_FINISH_1 >= 100
# define BOOST_PP_ITERATION_1 100
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 101 && BOOST_PP_ITERATION_FINISH_1 >= 101
# define BOOST_PP_ITERATION_1 101
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 102 && BOOST_PP_ITERATION_FINISH_1 >= 102
# define BOOST_PP_ITERATION_1 102
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 103 && BOOST_PP_ITERATION_FINISH_1 >= 103
# define BOOST_PP_ITERATION_1 103
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 104 && BOOST_PP_ITERATION_FINISH_1 >= 104
# define BOOST_PP_ITERATION_1 104
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 105 && BOOST_PP_ITERATION_FINISH_1 >= 105
# define BOOST_PP_ITERATION_1 105
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 106 && BOOST_PP_ITERATION_FINISH_1 >= 106
# define BOOST_PP_ITERATION_1 106
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 107 && BOOST_PP_ITERATION_FINISH_1 >= 107
# define BOOST_PP_ITERATION_1 107
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 108 && BOOST_PP_ITERATION_FINISH_1 >= 108
# define BOOST_PP_ITERATION_1 108
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 109 && BOOST_PP_ITERATION_FINISH_1 >= 109
# define BOOST_PP_ITERATION_1 109
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 110 && BOOST_PP_ITERATION_FINISH_1 >= 110
# define BOOST_PP_ITERATION_1 110
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 111 && BOOST_PP_ITERATION_FINISH_1 >= 111
# define BOOST_PP_ITERATION_1 111
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 112 && BOOST_PP_ITERATION_FINISH_1 >= 112
# define BOOST_PP_ITERATION_1 112
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 113 && BOOST_PP_ITERATION_FINISH_1 >= 113
# define BOOST_PP_ITERATION_1 113
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 114 && BOOST_PP_ITERATION_FINISH_1 >= 114
# define BOOST_PP_ITERATION_1 114
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 115 && BOOST_PP_ITERATION_FINISH_1 >= 115
# define BOOST_PP_ITERATION_1 115
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 116 && BOOST_PP_ITERATION_FINISH_1 >= 116
# define BOOST_PP_ITERATION_1 116
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 117 && BOOST_PP_ITERATION_FINISH_1 >= 117
# define BOOST_PP_ITERATION_1 117
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 118 && BOOST_PP_ITERATION_FINISH_1 >= 118
# define BOOST_PP_ITERATION_1 118
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 119 && BOOST_PP_ITERATION_FINISH_1 >= 119
# define BOOST_PP_ITERATION_1 119
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 120 && BOOST_PP_ITERATION_FINISH_1 >= 120
# define BOOST_PP_ITERATION_1 120
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 121 && BOOST_PP_ITERATION_FINISH_1 >= 121
# define BOOST_PP_ITERATION_1 121
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 122 && BOOST_PP_ITERATION_FINISH_1 >= 122
# define BOOST_PP_ITERATION_1 122
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 123 && BOOST_PP_ITERATION_FINISH_1 >= 123
# define BOOST_PP_ITERATION_1 123
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 124 && BOOST_PP_ITERATION_FINISH_1 >= 124
# define BOOST_PP_ITERATION_1 124
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 125 && BOOST_PP_ITERATION_FINISH_1 >= 125
# define BOOST_PP_ITERATION_1 125
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 126 && BOOST_PP_ITERATION_FINISH_1 >= 126
# define BOOST_PP_ITERATION_1 126
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 127 && BOOST_PP_ITERATION_FINISH_1 >= 127
# define BOOST_PP_ITERATION_1 127
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 128 && BOOST_PP_ITERATION_FINISH_1 >= 128
# define BOOST_PP_ITERATION_1 128
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 129 && BOOST_PP_ITERATION_FINISH_1 >= 129
# define BOOST_PP_ITERATION_1 129
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 130 && BOOST_PP_ITERATION_FINISH_1 >= 130
# define BOOST_PP_ITERATION_1 130
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 131 && BOOST_PP_ITERATION_FINISH_1 >= 131
# define BOOST_PP_ITERATION_1 131
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 132 && BOOST_PP_ITERATION_FINISH_1 >= 132
# define BOOST_PP_ITERATION_1 132
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 133 && BOOST_PP_ITERATION_FINISH_1 >= 133
# define BOOST_PP_ITERATION_1 133
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 134 && BOOST_PP_ITERATION_FINISH_1 >= 134
# define BOOST_PP_ITERATION_1 134
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 135 && BOOST_PP_ITERATION_FINISH_1 >= 135
# define BOOST_PP_ITERATION_1 135
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 136 && BOOST_PP_ITERATION_FINISH_1 >= 136
# define BOOST_PP_ITERATION_1 136
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 137 && BOOST_PP_ITERATION_FINISH_1 >= 137
# define BOOST_PP_ITERATION_1 137
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 138 && BOOST_PP_ITERATION_FINISH_1 >= 138
# define BOOST_PP_ITERATION_1 138
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 139 && BOOST_PP_ITERATION_FINISH_1 >= 139
# define BOOST_PP_ITERATION_1 139
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 140 && BOOST_PP_ITERATION_FINISH_1 >= 140
# define BOOST_PP_ITERATION_1 140
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 141 && BOOST_PP_ITERATION_FINISH_1 >= 141
# define BOOST_PP_ITERATION_1 141
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 142 && BOOST_PP_ITERATION_FINISH_1 >= 142
# define BOOST_PP_ITERATION_1 142
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 143 && BOOST_PP_ITERATION_FINISH_1 >= 143
# define BOOST_PP_ITERATION_1 143
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 144 && BOOST_PP_ITERATION_FINISH_1 >= 144
# define BOOST_PP_ITERATION_1 144
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 145 && BOOST_PP_ITERATION_FINISH_1 >= 145
# define BOOST_PP_ITERATION_1 145
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 146 && BOOST_PP_ITERATION_FINISH_1 >= 146
# define BOOST_PP_ITERATION_1 146
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 147 && BOOST_PP_ITERATION_FINISH_1 >= 147
# define BOOST_PP_ITERATION_1 147
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 148 && BOOST_PP_ITERATION_FINISH_1 >= 148
# define BOOST_PP_ITERATION_1 148
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 149 && BOOST_PP_ITERATION_FINISH_1 >= 149
# define BOOST_PP_ITERATION_1 149
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 150 && BOOST_PP_ITERATION_FINISH_1 >= 150
# define BOOST_PP_ITERATION_1 150
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 151 && BOOST_PP_ITERATION_FINISH_1 >= 151
# define BOOST_PP_ITERATION_1 151
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 152 && BOOST_PP_ITERATION_FINISH_1 >= 152
# define BOOST_PP_ITERATION_1 152
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 153 && BOOST_PP_ITERATION_FINISH_1 >= 153
# define BOOST_PP_ITERATION_1 153
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 154 && BOOST_PP_ITERATION_FINISH_1 >= 154
# define BOOST_PP_ITERATION_1 154
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 155 && BOOST_PP_ITERATION_FINISH_1 >= 155
# define BOOST_PP_ITERATION_1 155
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 156 && BOOST_PP_ITERATION_FINISH_1 >= 156
# define BOOST_PP_ITERATION_1 156
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 157 && BOOST_PP_ITERATION_FINISH_1 >= 157
# define BOOST_PP_ITERATION_1 157
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 158 && BOOST_PP_ITERATION_FINISH_1 >= 158
# define BOOST_PP_ITERATION_1 158
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 159 && BOOST_PP_ITERATION_FINISH_1 >= 159
# define BOOST_PP_ITERATION_1 159
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 160 && BOOST_PP_ITERATION_FINISH_1 >= 160
# define BOOST_PP_ITERATION_1 160
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 161 && BOOST_PP_ITERATION_FINISH_1 >= 161
# define BOOST_PP_ITERATION_1 161
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 162 && BOOST_PP_ITERATION_FINISH_1 >= 162
# define BOOST_PP_ITERATION_1 162
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 163 && BOOST_PP_ITERATION_FINISH_1 >= 163
# define BOOST_PP_ITERATION_1 163
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 164 && BOOST_PP_ITERATION_FINISH_1 >= 164
# define BOOST_PP_ITERATION_1 164
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 165 && BOOST_PP_ITERATION_FINISH_1 >= 165
# define BOOST_PP_ITERATION_1 165
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 166 && BOOST_PP_ITERATION_FINISH_1 >= 166
# define BOOST_PP_ITERATION_1 166
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 167 && BOOST_PP_ITERATION_FINISH_1 >= 167
# define BOOST_PP_ITERATION_1 167
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 168 && BOOST_PP_ITERATION_FINISH_1 >= 168
# define BOOST_PP_ITERATION_1 168
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 169 && BOOST_PP_ITERATION_FINISH_1 >= 169
# define BOOST_PP_ITERATION_1 169
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 170 && BOOST_PP_ITERATION_FINISH_1 >= 170
# define BOOST_PP_ITERATION_1 170
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 171 && BOOST_PP_ITERATION_FINISH_1 >= 171
# define BOOST_PP_ITERATION_1 171
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 172 && BOOST_PP_ITERATION_FINISH_1 >= 172
# define BOOST_PP_ITERATION_1 172
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 173 && BOOST_PP_ITERATION_FINISH_1 >= 173
# define BOOST_PP_ITERATION_1 173
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 174 && BOOST_PP_ITERATION_FINISH_1 >= 174
# define BOOST_PP_ITERATION_1 174
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 175 && BOOST_PP_ITERATION_FINISH_1 >= 175
# define BOOST_PP_ITERATION_1 175
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 176 && BOOST_PP_ITERATION_FINISH_1 >= 176
# define BOOST_PP_ITERATION_1 176
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 177 && BOOST_PP_ITERATION_FINISH_1 >= 177
# define BOOST_PP_ITERATION_1 177
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 178 && BOOST_PP_ITERATION_FINISH_1 >= 178
# define BOOST_PP_ITERATION_1 178
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 179 && BOOST_PP_ITERATION_FINISH_1 >= 179
# define BOOST_PP_ITERATION_1 179
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 180 && BOOST_PP_ITERATION_FINISH_1 >= 180
# define BOOST_PP_ITERATION_1 180
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 181 && BOOST_PP_ITERATION_FINISH_1 >= 181
# define BOOST_PP_ITERATION_1 181
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 182 && BOOST_PP_ITERATION_FINISH_1 >= 182
# define BOOST_PP_ITERATION_1 182
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 183 && BOOST_PP_ITERATION_FINISH_1 >= 183
# define BOOST_PP_ITERATION_1 183
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 184 && BOOST_PP_ITERATION_FINISH_1 >= 184
# define BOOST_PP_ITERATION_1 184
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 185 && BOOST_PP_ITERATION_FINISH_1 >= 185
# define BOOST_PP_ITERATION_1 185
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 186 && BOOST_PP_ITERATION_FINISH_1 >= 186
# define BOOST_PP_ITERATION_1 186
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 187 && BOOST_PP_ITERATION_FINISH_1 >= 187
# define BOOST_PP_ITERATION_1 187
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 188 && BOOST_PP_ITERATION_FINISH_1 >= 188
# define BOOST_PP_ITERATION_1 188
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 189 && BOOST_PP_ITERATION_FINISH_1 >= 189
# define BOOST_PP_ITERATION_1 189
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 190 && BOOST_PP_ITERATION_FINISH_1 >= 190
# define BOOST_PP_ITERATION_1 190
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 191 && BOOST_PP_ITERATION_FINISH_1 >= 191
# define BOOST_PP_ITERATION_1 191
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 192 && BOOST_PP_ITERATION_FINISH_1 >= 192
# define BOOST_PP_ITERATION_1 192
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 193 && BOOST_PP_ITERATION_FINISH_1 >= 193
# define BOOST_PP_ITERATION_1 193
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 194 && BOOST_PP_ITERATION_FINISH_1 >= 194
# define BOOST_PP_ITERATION_1 194
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 195 && BOOST_PP_ITERATION_FINISH_1 >= 195
# define BOOST_PP_ITERATION_1 195
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 196 && BOOST_PP_ITERATION_FINISH_1 >= 196
# define BOOST_PP_ITERATION_1 196
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 197 && BOOST_PP_ITERATION_FINISH_1 >= 197
# define BOOST_PP_ITERATION_1 197
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 198 && BOOST_PP_ITERATION_FINISH_1 >= 198
# define BOOST_PP_ITERATION_1 198
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 199 && BOOST_PP_ITERATION_FINISH_1 >= 199
# define BOOST_PP_ITERATION_1 199
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 200 && BOOST_PP_ITERATION_FINISH_1 >= 200
# define BOOST_PP_ITERATION_1 200
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 201 && BOOST_PP_ITERATION_FINISH_1 >= 201
# define BOOST_PP_ITERATION_1 201
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 202 && BOOST_PP_ITERATION_FINISH_1 >= 202
# define BOOST_PP_ITERATION_1 202
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 203 && BOOST_PP_ITERATION_FINISH_1 >= 203
# define BOOST_PP_ITERATION_1 203
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 204 && BOOST_PP_ITERATION_FINISH_1 >= 204
# define BOOST_PP_ITERATION_1 204
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 205 && BOOST_PP_ITERATION_FINISH_1 >= 205
# define BOOST_PP_ITERATION_1 205
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 206 && BOOST_PP_ITERATION_FINISH_1 >= 206
# define BOOST_PP_ITERATION_1 206
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 207 && BOOST_PP_ITERATION_FINISH_1 >= 207
# define BOOST_PP_ITERATION_1 207
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 208 && BOOST_PP_ITERATION_FINISH_1 >= 208
# define BOOST_PP_ITERATION_1 208
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 209 && BOOST_PP_ITERATION_FINISH_1 >= 209
# define BOOST_PP_ITERATION_1 209
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 210 && BOOST_PP_ITERATION_FINISH_1 >= 210
# define BOOST_PP_ITERATION_1 210
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 211 && BOOST_PP_ITERATION_FINISH_1 >= 211
# define BOOST_PP_ITERATION_1 211
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 212 && BOOST_PP_ITERATION_FINISH_1 >= 212
# define BOOST_PP_ITERATION_1 212
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 213 && BOOST_PP_ITERATION_FINISH_1 >= 213
# define BOOST_PP_ITERATION_1 213
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 214 && BOOST_PP_ITERATION_FINISH_1 >= 214
# define BOOST_PP_ITERATION_1 214
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 215 && BOOST_PP_ITERATION_FINISH_1 >= 215
# define BOOST_PP_ITERATION_1 215
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 216 && BOOST_PP_ITERATION_FINISH_1 >= 216
# define BOOST_PP_ITERATION_1 216
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 217 && BOOST_PP_ITERATION_FINISH_1 >= 217
# define BOOST_PP_ITERATION_1 217
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 218 && BOOST_PP_ITERATION_FINISH_1 >= 218
# define BOOST_PP_ITERATION_1 218
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 219 && BOOST_PP_ITERATION_FINISH_1 >= 219
# define BOOST_PP_ITERATION_1 219
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 220 && BOOST_PP_ITERATION_FINISH_1 >= 220
# define BOOST_PP_ITERATION_1 220
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 221 && BOOST_PP_ITERATION_FINISH_1 >= 221
# define BOOST_PP_ITERATION_1 221
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 222 && BOOST_PP_ITERATION_FINISH_1 >= 222
# define BOOST_PP_ITERATION_1 222
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 223 && BOOST_PP_ITERATION_FINISH_1 >= 223
# define BOOST_PP_ITERATION_1 223
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 224 && BOOST_PP_ITERATION_FINISH_1 >= 224
# define BOOST_PP_ITERATION_1 224
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 225 && BOOST_PP_ITERATION_FINISH_1 >= 225
# define BOOST_PP_ITERATION_1 225
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 226 && BOOST_PP_ITERATION_FINISH_1 >= 226
# define BOOST_PP_ITERATION_1 226
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 227 && BOOST_PP_ITERATION_FINISH_1 >= 227
# define BOOST_PP_ITERATION_1 227
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 228 && BOOST_PP_ITERATION_FINISH_1 >= 228
# define BOOST_PP_ITERATION_1 228
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 229 && BOOST_PP_ITERATION_FINISH_1 >= 229
# define BOOST_PP_ITERATION_1 229
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 230 && BOOST_PP_ITERATION_FINISH_1 >= 230
# define BOOST_PP_ITERATION_1 230
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 231 && BOOST_PP_ITERATION_FINISH_1 >= 231
# define BOOST_PP_ITERATION_1 231
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 232 && BOOST_PP_ITERATION_FINISH_1 >= 232
# define BOOST_PP_ITERATION_1 232
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 233 && BOOST_PP_ITERATION_FINISH_1 >= 233
# define BOOST_PP_ITERATION_1 233
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 234 && BOOST_PP_ITERATION_FINISH_1 >= 234
# define BOOST_PP_ITERATION_1 234
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 235 && BOOST_PP_ITERATION_FINISH_1 >= 235
# define BOOST_PP_ITERATION_1 235
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 236 && BOOST_PP_ITERATION_FINISH_1 >= 236
# define BOOST_PP_ITERATION_1 236
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 237 && BOOST_PP_ITERATION_FINISH_1 >= 237
# define BOOST_PP_ITERATION_1 237
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 238 && BOOST_PP_ITERATION_FINISH_1 >= 238
# define BOOST_PP_ITERATION_1 238
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 239 && BOOST_PP_ITERATION_FINISH_1 >= 239
# define BOOST_PP_ITERATION_1 239
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 240 && BOOST_PP_ITERATION_FINISH_1 >= 240
# define BOOST_PP_ITERATION_1 240
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 241 && BOOST_PP_ITERATION_FINISH_1 >= 241
# define BOOST_PP_ITERATION_1 241
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 242 && BOOST_PP_ITERATION_FINISH_1 >= 242
# define BOOST_PP_ITERATION_1 242
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 243 && BOOST_PP_ITERATION_FINISH_1 >= 243
# define BOOST_PP_ITERATION_1 243
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 244 && BOOST_PP_ITERATION_FINISH_1 >= 244
# define BOOST_PP_ITERATION_1 244
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 245 && BOOST_PP_ITERATION_FINISH_1 >= 245
# define BOOST_PP_ITERATION_1 245
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 246 && BOOST_PP_ITERATION_FINISH_1 >= 246
# define BOOST_PP_ITERATION_1 246
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 247 && BOOST_PP_ITERATION_FINISH_1 >= 247
# define BOOST_PP_ITERATION_1 247
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 248 && BOOST_PP_ITERATION_FINISH_1 >= 248
# define BOOST_PP_ITERATION_1 248
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 249 && BOOST_PP_ITERATION_FINISH_1 >= 249
# define BOOST_PP_ITERATION_1 249
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 250 && BOOST_PP_ITERATION_FINISH_1 >= 250
# define BOOST_PP_ITERATION_1 250
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 251 && BOOST_PP_ITERATION_FINISH_1 >= 251
# define BOOST_PP_ITERATION_1 251
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 252 && BOOST_PP_ITERATION_FINISH_1 >= 252
# define BOOST_PP_ITERATION_1 252
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 253 && BOOST_PP_ITERATION_FINISH_1 >= 253
# define BOOST_PP_ITERATION_1 253
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 254 && BOOST_PP_ITERATION_FINISH_1 >= 254
# define BOOST_PP_ITERATION_1 254
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 255 && BOOST_PP_ITERATION_FINISH_1 >= 255
# define BOOST_PP_ITERATION_1 255
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# if BOOST_PP_ITERATION_START_1 <= 256 && BOOST_PP_ITERATION_FINISH_1 >= 256
# define BOOST_PP_ITERATION_1 256
# include BOOST_PP_FILENAME_1
# undef BOOST_PP_ITERATION_1
# endif
# endif
#
# undef BOOST_PP_IS_ITERATING
#
# undef BOOST_PP_ITERATION_DEPTH
# define BOOST_PP_ITERATION_DEPTH() 0
#
# undef BOOST_PP_ITERATION_START_1
# undef BOOST_PP_ITERATION_FINISH_1
# undef BOOST_PP_FILENAME_1
#
# undef BOOST_PP_ITERATION_FLAGS_1
# undef BOOST_PP_ITERATION_PARAMS_1
| {
"pile_set_name": "Github"
} |
namespace UnityEngine.PostProcessing
{
public sealed class GetSetAttribute : PropertyAttribute
{
public readonly string name;
public bool dirty;
public GetSetAttribute(string name)
{
this.name = name;
}
}
}
| {
"pile_set_name": "Github"
} |
#' @rdname checkfiles
#' @export
checkHTML <- function(files,
...)
{
if (missing(files))
files <- tcltk::tk_choose.files()
txts <- sapply(files, getHTML)
names(txts) <- gsub(".html", "", basename(files))
names(txts) <- gsub(".htm", "", names(txts))
return(statcheck(txts, ...))
}
| {
"pile_set_name": "Github"
} |
/*
* /MathJax/jax/element/mml/optable/Dingbats.js
*
* Copyright (c) 2012 Design Science, Inc.
*
* Part of the MathJax library.
* See http://www.mathjax.org for details.
*
* Licensed under the Apache License, Version 2.0;
* you may not use this file except in compliance with the License.
*
* http://www.apache.org/licenses/LICENSE-2.0
*/
(function(a){var c=a.mo.OPTYPES;var b=a.TEXCLASS;MathJax.Hub.Insert(a.mo.prototype,{OPTABLE:{prefix:{"\u2772":c.OPEN},postfix:{"\u2773":c.CLOSE}}});MathJax.Ajax.loadComplete(a.optableDir+"/Dingbats.js")})(MathJax.ElementJax.mml);
| {
"pile_set_name": "Github"
} |
Куцылло Константин | Frontend Developer, Яндекс
| {
"pile_set_name": "Github"
} |
// Load modules
var Dgram = require('dgram');
var Dns = require('dns');
var Hoek = require('hoek');
// Declare internals
var internals = {};
exports.time = function (options, callback) {
if (arguments.length !== 2) {
callback = arguments[0];
options = {};
}
var settings = Hoek.clone(options);
settings.host = settings.host || 'pool.ntp.org';
settings.port = settings.port || 123;
settings.resolveReference = settings.resolveReference || false;
// Declare variables used by callback
var timeoutId = 0;
var sent = 0;
// Ensure callback is only called once
var finish = function (err, result) {
if (timeoutId) {
clearTimeout(timeoutId);
timeoutId = 0;
}
socket.removeAllListeners();
socket.once('error', internals.ignore);
socket.close();
return callback(err, result);
};
finish = Hoek.once(finish);
// Create UDP socket
var socket = Dgram.createSocket('udp4');
socket.once('error', function (err) {
return finish(err);
});
// Listen to incoming messages
socket.on('message', function (buffer, rinfo) {
var received = Date.now();
var message = new internals.NtpMessage(buffer);
if (!message.isValid) {
return finish(new Error('Invalid server response'), message);
}
if (message.originateTimestamp !== sent) {
return finish(new Error('Wrong originate timestamp'), message);
}
// Timestamp Name ID When Generated
// ------------------------------------------------------------
// Originate Timestamp T1 time request sent by client
// Receive Timestamp T2 time request received by server
// Transmit Timestamp T3 time reply sent by server
// Destination Timestamp T4 time reply received by client
//
// The roundtrip delay d and system clock offset t are defined as:
//
// d = (T4 - T1) - (T3 - T2) t = ((T2 - T1) + (T3 - T4)) / 2
var T1 = message.originateTimestamp;
var T2 = message.receiveTimestamp;
var T3 = message.transmitTimestamp;
var T4 = received;
message.d = (T4 - T1) - (T3 - T2);
message.t = ((T2 - T1) + (T3 - T4)) / 2;
message.receivedLocally = received;
if (!settings.resolveReference ||
message.stratum !== 'secondary') {
return finish(null, message);
}
// Resolve reference IP address
Dns.reverse(message.referenceId, function (err, domains) {
if (/* $lab:coverage:off$ */ !err /* $lab:coverage:on$ */) {
message.referenceHost = domains[0];
}
return finish(null, message);
});
});
// Set timeout
if (settings.timeout) {
timeoutId = setTimeout(function () {
timeoutId = 0;
return finish(new Error('Timeout'));
}, settings.timeout);
}
// Construct NTP message
var message = new Buffer(48);
for (var i = 0; i < 48; i++) { // Zero message
message[i] = 0;
}
message[0] = (0 << 6) + (4 << 3) + (3 << 0) // Set version number to 4 and Mode to 3 (client)
sent = Date.now();
internals.fromMsecs(sent, message, 40); // Set transmit timestamp (returns as originate)
// Send NTP request
socket.send(message, 0, message.length, settings.port, settings.host, function (err, bytes) {
if (err ||
bytes !== 48) {
return finish(err || new Error('Could not send entire message'));
}
});
};
internals.NtpMessage = function (buffer) {
this.isValid = false;
// Validate
if (buffer.length !== 48) {
return;
}
// Leap indicator
var li = (buffer[0] >> 6);
switch (li) {
case 0: this.leapIndicator = 'no-warning'; break;
case 1: this.leapIndicator = 'last-minute-61'; break;
case 2: this.leapIndicator = 'last-minute-59'; break;
case 3: this.leapIndicator = 'alarm'; break;
}
// Version
var vn = ((buffer[0] & 0x38) >> 3);
this.version = vn;
// Mode
var mode = (buffer[0] & 0x7);
switch (mode) {
case 1: this.mode = 'symmetric-active'; break;
case 2: this.mode = 'symmetric-passive'; break;
case 3: this.mode = 'client'; break;
case 4: this.mode = 'server'; break;
case 5: this.mode = 'broadcast'; break;
case 0:
case 6:
case 7: this.mode = 'reserved'; break;
}
// Stratum
var stratum = buffer[1];
if (stratum === 0) {
this.stratum = 'death';
}
else if (stratum === 1) {
this.stratum = 'primary';
}
else if (stratum <= 15) {
this.stratum = 'secondary';
}
else {
this.stratum = 'reserved';
}
// Poll interval (msec)
this.pollInterval = Math.round(Math.pow(2, buffer[2])) * 1000;
// Precision (msecs)
this.precision = Math.pow(2, buffer[3]) * 1000;
// Root delay (msecs)
var rootDelay = 256 * (256 * (256 * buffer[4] + buffer[5]) + buffer[6]) + buffer[7];
this.rootDelay = 1000 * (rootDelay / 0x10000);
// Root dispersion (msecs)
this.rootDispersion = ((buffer[8] << 8) + buffer[9] + ((buffer[10] << 8) + buffer[11]) / Math.pow(2, 16)) * 1000;
// Reference identifier
this.referenceId = '';
switch (this.stratum) {
case 'death':
case 'primary':
this.referenceId = String.fromCharCode(buffer[12]) + String.fromCharCode(buffer[13]) + String.fromCharCode(buffer[14]) + String.fromCharCode(buffer[15]);
break;
case 'secondary':
this.referenceId = '' + buffer[12] + '.' + buffer[13] + '.' + buffer[14] + '.' + buffer[15];
break;
}
// Reference timestamp
this.referenceTimestamp = internals.toMsecs(buffer, 16);
// Originate timestamp
this.originateTimestamp = internals.toMsecs(buffer, 24);
// Receive timestamp
this.receiveTimestamp = internals.toMsecs(buffer, 32);
// Transmit timestamp
this.transmitTimestamp = internals.toMsecs(buffer, 40);
// Validate
if (this.version === 4 &&
this.stratum !== 'reserved' &&
this.mode === 'server' &&
this.originateTimestamp &&
this.receiveTimestamp &&
this.transmitTimestamp) {
this.isValid = true;
}
return this;
};
internals.toMsecs = function (buffer, offset) {
var seconds = 0;
var fraction = 0;
for (var i = 0; i < 4; ++i) {
seconds = (seconds * 256) + buffer[offset + i];
}
for (i = 4; i < 8; ++i) {
fraction = (fraction * 256) + buffer[offset + i];
}
return ((seconds - 2208988800 + (fraction / Math.pow(2, 32))) * 1000);
};
internals.fromMsecs = function (ts, buffer, offset) {
var seconds = Math.floor(ts / 1000) + 2208988800;
var fraction = Math.round((ts % 1000) / 1000 * Math.pow(2, 32));
buffer[offset + 0] = (seconds & 0xFF000000) >> 24;
buffer[offset + 1] = (seconds & 0x00FF0000) >> 16;
buffer[offset + 2] = (seconds & 0x0000FF00) >> 8;
buffer[offset + 3] = (seconds & 0x000000FF);
buffer[offset + 4] = (fraction & 0xFF000000) >> 24;
buffer[offset + 5] = (fraction & 0x00FF0000) >> 16;
buffer[offset + 6] = (fraction & 0x0000FF00) >> 8;
buffer[offset + 7] = (fraction & 0x000000FF);
};
// Offset singleton
internals.last = {
offset: 0,
expires: 0,
host: '',
port: 0
};
exports.offset = function (options, callback) {
if (arguments.length !== 2) {
callback = arguments[0];
options = {};
}
var now = Date.now();
var clockSyncRefresh = options.clockSyncRefresh || 24 * 60 * 60 * 1000; // Daily
if (internals.last.offset &&
internals.last.host === options.host &&
internals.last.port === options.port &&
now < internals.last.expires) {
process.nextTick(function () {
callback(null, internals.last.offset);
});
return;
}
exports.time(options, function (err, time) {
if (err) {
return callback(err, 0);
}
internals.last = {
offset: Math.round(time.t),
expires: now + clockSyncRefresh,
host: options.host,
port: options.port
};
return callback(null, internals.last.offset);
});
};
// Now singleton
internals.now = {
intervalId: 0
};
exports.start = function (options, callback) {
if (arguments.length !== 2) {
callback = arguments[0];
options = {};
}
if (internals.now.intervalId) {
process.nextTick(function () {
callback();
});
return;
}
exports.offset(options, function (err, offset) {
internals.now.intervalId = setInterval(function () {
exports.offset(options, function () { });
}, options.clockSyncRefresh || 24 * 60 * 60 * 1000); // Daily
return callback();
});
};
exports.stop = function () {
if (!internals.now.intervalId) {
return;
}
clearInterval(internals.now.intervalId);
internals.now.intervalId = 0;
};
exports.isLive = function () {
return !!internals.now.intervalId;
};
exports.now = function () {
var now = Date.now();
if (!exports.isLive() ||
now >= internals.last.expires) {
return now;
}
return now + internals.last.offset;
};
internals.ignore = function () {
};
| {
"pile_set_name": "Github"
} |
//
// Generated by class-dump 3.5 (64 bit) (Debug version compiled Oct 15 2018 10:31:50).
//
// class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2015 by Steve Nygard.
//
@protocol PPSpecifierExtensionHosting
- (void)___nsx_pingHost:(void (^)(void))arg1;
@end
| {
"pile_set_name": "Github"
} |
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="apple-touch-icon-precomposed" sizes="144x144" href="../../assets/img/favicon-144.png">
<link rel="apple-touch-icon-precomposed" sizes="114x114" href="../../assets/img/favicon-144.png">
<link rel="apple-touch-icon-precomposed" sizes="72x72" href="../../assets/img/favicon-72.png">
<link rel="apple-touch-icon-precomposed" href="../../assets/img/favicon-32.png">
<link rel="shortcut icon" href="../../assets/img/favicon-32.png">
<link rel="stylesheet" href="../../assets/css/vk.css"/>
<link rel="stylesheet" href="../../assets/css/prism.css"/>
<vk-title>vkCmdDebugMarkerEndEXT | NVK</vk-title>
<vk-centered>
<vk-navigation>
<vk-search-title>Search</vk-search-title>
<vk-search>
<input type="text" id="search" autocomplete="off" />
<vk-search-results>
<ol id="search-list">
<li id="no-search-results">No Results</li>
</ol>
</vk-search-results>
</vk-search>
<vk-section-title style="margin-top: 1em;">Categories</vk-section-title>
<vk-categories></vk-categories>
</vk-navigation>
<vk-struct>
<vk-name>vkCmdDebugMarkerEndEXT</vk-name>
<vk-description>Close a command buffer marker region</vk-description>
<vk-section-title>Syntax</vk-section-title>
<vk-syntax>
<pre><code class="language-js">void vkCmdDebugMarkerEndEXT();
</code></pre>
</vk-syntax>
<vk-section-title>Parameters</vk-section-title>
<vk-properties>
<vk-property-entry>
<vk-property-parameter>commandBuffer</vk-property-parameter>
<vk-property-type type="object"><a href="../handles/VkCommandBuffer.html">VkCommandBuffer</a></vk-property-type>
<vk-property-description> is the command buffer into which the command is recorded.</vk-property-description>
</vk-property-entry>
</vk-properties>
</vk-struct>
</vk-centered>
<script>
const IS_ROOT = false;
</script>
<script type="text/javascript" src="../../assets/js/prism.min.js"></script>
<script type="text/javascript" src="../../assets/js/index.js"></script>
| {
"pile_set_name": "Github"
} |
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto
package v1beta1
import (
fmt "fmt"
io "io"
proto "github.com/gogo/protobuf/proto"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} }
func (*PartialObjectMetadataList) ProtoMessage() {}
func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) {
return fileDescriptor_90ec10f86b91f9a8, []int{0}
}
func (m *PartialObjectMetadataList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *PartialObjectMetadataList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *PartialObjectMetadataList) XXX_Merge(src proto.Message) {
xxx_messageInfo_PartialObjectMetadataList.Merge(m, src)
}
func (m *PartialObjectMetadataList) XXX_Size() int {
return m.Size()
}
func (m *PartialObjectMetadataList) XXX_DiscardUnknown() {
xxx_messageInfo_PartialObjectMetadataList.DiscardUnknown(m)
}
var xxx_messageInfo_PartialObjectMetadataList proto.InternalMessageInfo
func init() {
proto.RegisterType((*PartialObjectMetadataList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1beta1.PartialObjectMetadataList")
}
func init() {
proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto", fileDescriptor_90ec10f86b91f9a8)
}
var fileDescriptor_90ec10f86b91f9a8 = []byte{
// 321 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0x41, 0x4b, 0xf3, 0x30,
0x18, 0xc7, 0x9b, 0xf7, 0x65, 0x38, 0x3a, 0x04, 0xd9, 0x69, 0xee, 0x90, 0x0d, 0x4f, 0xf3, 0xb0,
0x84, 0x0d, 0x11, 0xc1, 0xdb, 0x6e, 0x82, 0xa2, 0xec, 0x28, 0x1e, 0x4c, 0xbb, 0xc7, 0x2e, 0xd6,
0x34, 0x25, 0x79, 0x3a, 0xf0, 0xe6, 0x47, 0xf0, 0x63, 0xed, 0xb8, 0xe3, 0x40, 0x18, 0xae, 0x7e,
0x11, 0x49, 0x57, 0x45, 0xa6, 0x62, 0x6f, 0x79, 0xfe, 0xe1, 0xf7, 0xcb, 0x3f, 0x89, 0x3f, 0x8e,
0x4f, 0x2c, 0x93, 0x9a, 0xc7, 0x59, 0x00, 0x26, 0x01, 0x04, 0xcb, 0x67, 0x90, 0x4c, 0xb4, 0xe1,
0xe5, 0x86, 0x48, 0xa5, 0x12, 0xe1, 0x54, 0x26, 0x60, 0x1e, 0x79, 0x1a, 0x47, 0x2e, 0xb0, 0x5c,
0x01, 0x0a, 0x3e, 0x1b, 0x04, 0x80, 0x62, 0xc0, 0x23, 0x48, 0xc0, 0x08, 0x84, 0x09, 0x4b, 0x8d,
0x46, 0xdd, 0x3c, 0xdc, 0xa0, 0xec, 0x2b, 0xca, 0xd2, 0x38, 0x72, 0x81, 0x65, 0x0e, 0x65, 0x25,
0xda, 0xee, 0x47, 0x12, 0xa7, 0x59, 0xc0, 0x42, 0xad, 0x78, 0xa4, 0x23, 0xcd, 0x0b, 0x43, 0x90,
0xdd, 0x15, 0x53, 0x31, 0x14, 0xab, 0x8d, 0xb9, 0x7d, 0x54, 0xa5, 0xd4, 0x76, 0x9f, 0xf6, 0xaf,
0x57, 0x31, 0x59, 0x82, 0x52, 0xc1, 0x37, 0xe0, 0xf8, 0x2f, 0xc0, 0x86, 0x53, 0x50, 0x62, 0x9b,
0x3b, 0x78, 0x21, 0xfe, 0xfe, 0x95, 0x30, 0x28, 0xc5, 0xc3, 0x65, 0x70, 0x0f, 0x21, 0x5e, 0x00,
0x8a, 0x89, 0x40, 0x71, 0x2e, 0x2d, 0x36, 0x6f, 0xfc, 0xba, 0x2a, 0xe7, 0xd6, 0xbf, 0x2e, 0xe9,
0x35, 0x86, 0x8c, 0x55, 0x79, 0x29, 0xe6, 0x68, 0x67, 0x1a, 0xed, 0xcd, 0x57, 0x1d, 0x2f, 0x5f,
0x75, 0xea, 0x1f, 0xc9, 0xf8, 0xd3, 0xd8, 0xbc, 0xf5, 0x6b, 0x12, 0x41, 0xd9, 0x16, 0xe9, 0xfe,
0xef, 0x35, 0x86, 0xa7, 0xd5, 0xd4, 0x3f, 0xb6, 0x1d, 0xed, 0x96, 0xe7, 0xd4, 0xce, 0x9c, 0x71,
0xbc, 0x11, 0x8f, 0xfa, 0xf3, 0x35, 0xf5, 0x16, 0x6b, 0xea, 0x2d, 0xd7, 0xd4, 0x7b, 0xca, 0x29,
0x99, 0xe7, 0x94, 0x2c, 0x72, 0x4a, 0x96, 0x39, 0x25, 0xaf, 0x39, 0x25, 0xcf, 0x6f, 0xd4, 0xbb,
0xde, 0x29, 0xbf, 0xf6, 0x3d, 0x00, 0x00, 0xff, 0xff, 0xc6, 0x7e, 0x00, 0x08, 0x5a, 0x02, 0x00,
0x00,
}
func (m *PartialObjectMetadataList) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *PartialObjectMetadataList) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *PartialObjectMetadataList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
{
size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
if len(m.Items) > 0 {
for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
offset -= sovGenerated(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *PartialObjectMetadataList) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Items) > 0 {
for _, e := range m.Items {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
l = m.ListMeta.Size()
n += 1 + l + sovGenerated(uint64(l))
return n
}
func sovGenerated(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozGenerated(x uint64) (n int) {
return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *PartialObjectMetadataList) String() string {
if this == nil {
return "nil"
}
repeatedStringForItems := "[]PartialObjectMetadata{"
for _, f := range this.Items {
repeatedStringForItems += fmt.Sprintf("%v", f) + ","
}
repeatedStringForItems += "}"
s := strings.Join([]string{`&PartialObjectMetadataList{`,
`Items:` + repeatedStringForItems + `,`,
`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
`}`,
}, "")
return s
}
func valueToStringGenerated(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: PartialObjectMetadataList: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: PartialObjectMetadataList: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Items = append(m.Items, v1.PartialObjectMetadata{})
if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipGenerated(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthGenerated
}
iNdEx += length
if iNdEx < 0 {
return 0, ErrInvalidLengthGenerated
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipGenerated(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
if iNdEx < 0 {
return 0, ErrInvalidLengthGenerated
}
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
)
| {
"pile_set_name": "Github"
} |
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cni
import (
"net"
"github.com/containernetworking/cni/pkg/types"
"github.com/containernetworking/cni/pkg/types/current"
"github.com/pkg/errors"
)
type IPConfig struct {
IP net.IP
Gateway net.IP
}
type CNIResult struct {
Interfaces map[string]*Config
DNS []types.DNS
Routes []*types.Route
}
type Config struct {
IPConfigs []*IPConfig
Mac string
Sandbox string
}
// GetCNIResultFromResults returns a structured data containing the
// interface configuration for each of the interfaces created in the namespace.
// Conforms with
// Result:
// a) Interfaces list. Depending on the plugin, this can include the sandbox
// (eg, container or hypervisor) interface name and/or the host interface
// name, the hardware addresses of each interface, and details about the
// sandbox (if any) the interface is in.
// b) IP configuration assigned to each interface. The IPv4 and/or IPv6 addresses,
// gateways, and routes assigned to sandbox and/or host interfaces.
// c) DNS information. Dictionary that includes DNS information for nameservers,
// domain, search domains and options.
func (c *libcni) GetCNIResultFromResults(results []*current.Result) (*CNIResult, error) {
r := &CNIResult{
Interfaces: make(map[string]*Config),
}
// Plugins may not need to return Interfaces in result if
// if there are no multiple interfaces created. In that case
// all configs should be applied against default interface
r.Interfaces[defaultInterface(c.prefix)] = &Config{}
// Walk through all the results
for _, result := range results {
// Walk through all the interface in each result
for _, intf := range result.Interfaces {
r.Interfaces[intf.Name] = &Config{
Mac: intf.Mac,
Sandbox: intf.Sandbox,
}
}
// Walk through all the IPs in the result and attach it to corresponding
// interfaces
for _, ipConf := range result.IPs {
if err := validateInterfaceConfig(ipConf, len(result.Interfaces)); err != nil {
return nil, errors.Wrapf(ErrInvalidResult, "failed to valid interface config: %v", err)
}
name := c.getInterfaceName(result.Interfaces, ipConf)
r.Interfaces[name].IPConfigs = append(r.Interfaces[name].IPConfigs,
&IPConfig{IP: ipConf.Address.IP, Gateway: ipConf.Gateway})
}
r.DNS = append(r.DNS, result.DNS)
r.Routes = append(r.Routes, result.Routes...)
}
if _, ok := r.Interfaces[defaultInterface(c.prefix)]; !ok {
return nil, errors.Wrapf(ErrNotFound, "default network not found")
}
return r, nil
}
// getInterfaceName returns the interface name if the plugins
// return the result with associated interfaces. If interface
// is not present then default interface name is used
func (c *libcni) getInterfaceName(interfaces []*current.Interface,
ipConf *current.IPConfig) string {
if ipConf.Interface != nil {
return interfaces[*ipConf.Interface].Name
}
return defaultInterface(c.prefix)
}
| {
"pile_set_name": "Github"
} |
/*
* COPYRIGHT (C) 2017-2019, zhllxt
*
* author : zhllxt
* email : [email protected]
*
* Distributed under the GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007
* (See accompanying file LICENSE or see <http://www.gnu.org/licenses/>)
*/
#ifndef __ASIO2_SELECTOR_HPP__
#define __ASIO2_SELECTOR_HPP__
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
#pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include <asio2/config.hpp>
#ifdef ASIO_STANDALONE
# ifndef ASIO_HEADER_ONLY
# define ASIO_HEADER_ONLY
# endif
#endif
#ifdef _MSC_VER
# pragma warning(push)
# pragma warning(disable:4311)
# pragma warning(disable:4312)
# pragma warning(disable:4996)
#endif
#if defined(__GNUC__) || defined(__GNUG__)
# pragma GCC diagnostic push
# pragma GCC diagnostic ignored "-Wunused-variable"
# pragma GCC diagnostic ignored "-Wdeprecated-declarations"
#endif
#if defined(__clang__)
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wunused-variable"
# pragma clang diagnostic ignored "-Wexceptions"
# pragma clang diagnostic ignored "-Wdeprecated-declarations"
# pragma clang diagnostic ignored "-Wunused-private-field"
# pragma clang diagnostic ignored "-Wunused-local-typedef"
# pragma clang diagnostic ignored "-Wunknown-warning-option"
#endif
#ifdef ASIO_STANDALONE
#include <asio/asio.hpp>
#if defined(ASIO2_USE_SSL)
#include <asio/ssl.hpp>
#endif
#ifndef BOOST_ASIO_VERSION
#define BOOST_ASIO_VERSION ASIO_VERSION
#endif
#else
#include <boost/asio.hpp>
#if defined(ASIO2_USE_SSL)
#include <boost/asio/ssl.hpp>
#endif
#ifndef ASIO_VERSION
#define ASIO_VERSION BOOST_ASIO_VERSION
#endif
#endif // ASIO_STANDALONE
#ifdef BEAST_HEADER_ONLY
#include <beast/beast.hpp>
#if defined(ASIO2_USE_SSL)
// boost 1.72(107200) BOOST_BEAST_VERSION 277
#if defined(BEAST_VERSION) && (BEAST_VERSION >= 277)
#include <beast/ssl.hpp>
#endif
#include <beast/websocket/ssl.hpp>
#endif
#ifndef BOOST_BEAST_VERSION
#define BOOST_BEAST_VERSION BEAST_VERSION
#endif
#ifndef BOOST_BEAST_VERSION_STRING
#define BOOST_BEAST_VERSION_STRING BEAST_VERSION_STRING
#endif
#else
#include <boost/beast.hpp>
#if defined(ASIO2_USE_SSL)
// boost 1.72(107200) BOOST_BEAST_VERSION 277
#if defined(BOOST_BEAST_VERSION) && (BOOST_BEAST_VERSION >= 277)
#include <boost/beast/ssl.hpp>
#endif
#include <boost/beast/websocket/ssl.hpp>
#endif
#ifndef BEAST_VERSION
#define BEAST_VERSION BOOST_BEAST_VERSION
#endif
#ifndef BEAST_VERSION_STRING
#define BEAST_VERSION_STRING BOOST_BEAST_VERSION_STRING
#endif
#endif // BEAST_HEADER_ONLY
#ifdef ASIO_STANDALONE
//namespace asio = ::asio;
#else
namespace boost::asio
{
using error_code = ::boost::system::error_code;
using system_error = ::boost::system::system_error;
}
namespace asio = ::boost::asio;
// [ adding definitions to namespace alias ]
// This is currently not allowed and probably won't be in C++1Z either,
// but note that a recent proposal is allowing
// https://stackoverflow.com/questions/31629101/adding-definitions-to-namespace-alias?r=SearchResults
//namespace asio
//{
// using error_code = ::boost::system::error_code;
// using system_error = ::boost::system::system_error;
//}
#endif // ASIO_STANDALONE
#ifdef BEAST_HEADER_ONLY
#else
namespace beast = ::boost::beast;
#endif // BEAST_HEADER_ONLY
namespace http = ::beast::http;
namespace websocket = ::beast::websocket;
namespace asio2
{
using error_code = ::asio::error_code;
using system_error = ::asio::system_error;
namespace http = ::beast::http;
namespace websocket = ::beast::websocket;
}
#if defined(__clang__)
# pragma clang diagnostic pop
#endif
#if defined(__GNUC__) || defined(__GNUG__)
# pragma GCC diagnostic pop
#endif
#if defined(_MSC_VER)
# pragma warning(pop)
#endif
#endif // !__ASIO2_SELECTOR_HPP__
| {
"pile_set_name": "Github"
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stats_univariates.R
\name{GetSigTable.FC}
\alias{GetSigTable.FC}
\title{Sig Table for Fold-Change Analysis}
\usage{
GetSigTable.FC(mSetObj = NA)
}
\arguments{
\item{mSetObj}{Input the name of the created mSetObj (see InitDataObjects)}
}
\description{
Sig Table for Fold-Change Analysis
}
| {
"pile_set_name": "Github"
} |
== Predicate Rule System
TBD: Fix links.
TBD: Add advantages/disadvantages table.
The Predicate Rule system provides an alternative rule injection system for IronBee. That is, it provides an alternative way to determine which rules should fire in a phase and does not change the metadata or actions available to rules. The system associates a predicate *expression* with a rule and fires the rule if and only if the predicate expression evaluates to true. The major advantages over the traditional rule system are:
Composition:: Predicate allow composition of expressions. Composition allows logic to be shared among rules. It also allows easy use of boolean operators such as 'and' and 'or' to compose logic.
Performance:: Common sub-expressions are merged, including across rules in the same phase. Such merging can have significant performance benefits, especially in rule sets with significant common logic. In addition, evaluation that can be done at configuration time is done so. This pre-evaluation is especially useful for implementing policy logic efficiently.
Domain specific language:: The expression syntax is oriented at easy machine generation and the use of domain specific languages for generating it. While it is possible to write predicate expressions directly, support in higher level languages such as Lua can facilitate expression of rule logic.
The major disadvantage over the traditional rule system are:
Order Independence:: Within a phase, no order guarantees are provided for rule firings. There is no way to specify that a rule should fire before another rule.
Side Effects:: Side effects of rules, such as setting variables, may not be visible to other predicate rules within the same phase.
Feature Delay:: The predicate system does provide mechanisms to call any operator defined for IronBee, however, as operators are written for the traditional rule system, they may have features that do not map into the predicate rule system. As such, features introduced in IronBee or from other modules may not be initially available in the predicate system.
The predicate system is conceptually divided into three parts:
Front End:: The front end is a domain specific language for generating predicate expressions. The front end is concerned with providing an easy and expressive syntax.
Graph:: The graph component gathers all predicate expressions and merges common sub-expression, forming a directed acyclic graph (DAG). It then executes a variety of validation and transformations on the DAG. Each input predicate expression corresponds to a *root* node in the DAG.
Evaluation:: At each transaction, all nodes in the DAG for that phase are evaluated. For each root node that is true, the corresponding rules are injected.
=== Expressions
Predicate expressions are trees. The top of the tree, the "root", is usually associated with a rule: the rule will fire if and only if the associated root is true. The leaves of the tree are usually literals. The intermediate nodes represent function calls.
Predicate expressions can be created via frontends which can have a variety of forms. The canonical expression of an expression, however, is via an s-expression (sexpr). E.g.,
----
(and (gt 1000 (length (var 'Content-Length'))) (eq 'GET' (var 'Request-Method')))
----
Each parenthetical sub-expression represents a function call with the name of the function followed by the arguments. For a complete discussion of the expression grammar and semantics, see (link:reference.txt[]) (link:reference.html[HTML]).
=== Implementation Details
TBD: Need to reorganize this a bit.
==== Action: +predicate+
The +predicate+ action indicates that a rule should be handled by predicate. The argument to the action should be an s-expression to attach the rule. The rule will be fired if and only if the s-expression is true.
Predicate rules do not need to have a phase specified. If a phase is specified, the rule is fired only in that phase. If no phase is specified, the rule will fire in the earlier phase in which the s-expression is true.
Predicate rules should not depend on targets or operators: they should always fire. In Waggle, use the +Predicate+ rule type. In the config rule language, use the +Action+ directive.
Predicate rules can (and probably should) use other actions that indicate what should happen when the rule fires. However, they cannot use actions that cause them to be owned by other rule injection systems (e.g., +fast+).
==== Action: +set_predicate_vars+
The +set_predicate_vars+ action causes the Predicate engine to set two variables before later actions fire. These variables are only valid for the actions in this rule.
+PREDICATE_VALUE+:: The value of the root node.
+PREDICATE_VALUE_NAME+:: The name of the value of the root node.
Warning: `set_predicate_vars` has mild performance cost and should only be used when needed.
TBD: Cleanup links below.
For an overview and discussion of the system, see link:introduction.html[introduction]. For a comprehensive reference to the language, see link:reference.html[reference]. For details on the modules, see link:modules.html[modules].
=== Introduction
TBD: Incorporate this redundant section into the above.
Predicate associates a predicate 'expression' with a rule and fires the rule if and only if the predicate expression evaluates to true. In comparison to the default rule system, Predicate has superior support for composition and configuration time optimization; on the downside, it does not support rule ordering and can be unintuitive.
SExpressions (sexprs) are the underlying language of Predicate. It is not expected that you, a rule writer, will write SExpressions directly. However, you will see them in error messages and tools and is worthwhile to be familiar with them. As such, for most Lua code in this document, I have added Lua comments containing the equivalent SExpression, e.g.,
.Example: Equivalent SExpression
----
P.Gt(1000, P.Length(P.Var('REQUEST_URI')))
-- (gt 1000 (length (var 'REQUEST_URI')))
----
The second line is the S-Expression corresponding to the first line, prefixed with the Lua comment designator, `--`.
=== Orientation
==== Terminology
Top-Level Expression::
An expression associated with a rule. The rule will fire if and when the expression becomes truthy.
Truthy::
An expression or value that is interpreted as true.
Falsy::
An expression or value that is interpreted as false.
Waggle::
A Lua interface to writing IronBee rules.
Frontend::
A Lua interface to writing Predicate expressions.
SExpr::
The low level representation of Predicate expressions.
==== First Steps
Let's begin with a series of basic examples. Consider the following logic:
[quote]
If the URI of the request is really long, then ...
Interpreting, ``is really long'' to mean ``has a length greater than 1000'', we can write this in the frontend as:
.Example: First Steps 1
----
P.Gt(1000, P.Length(P.Var('REQUEST_URI')))
-- (gt 1000 (length (var 'REQUEST_URI')))
----
This is Lua code. `P.Gt()` is a Lua function that produces a 'predicate object' from its arguments, which are in turn predicate objects (or Lua literals). The Waggle `predicate()` directive understands predicate objects and turns them into sexprs to pass on to Predicate, e.g.,
.Example: Waggle Rule
----
Rule("predicate_example", "1"):
predicate(
P.Gt(1000, P.Length(P.Var('REQUEST_URI')))
):
phase([[REQUEST_HEADER]]):
action([[clipp_announce:predicate_example]])
----
Predicate expressions are built up by composing 'predicate functions' along with literals.
Let's extend our logic to:
[quote]
If the URI of the request is really long and the request is a GET request, then ...
.Example: First Steps 2
----
P.And(
P.Gt(1000, P.Length(P.Var('REQUEST_URI'))),
P.Eq('GET', P.Var('REQUEST_METHOD'))
)
-- (and
-- (gt 1000 (length (var 'REQUEST_URI')))
-- (eq 'GET' (var 'REQUEST_METHOD'))
-- )
----
The frontend provides some additional interfaces to more easily express certain patterns. In particular, it allows using the `+` operator for logical AND. This changes our expression to:
.Example: First Steps 3
----
P.Gt(1000, P.Length(P.Var('REQUEST_URI')))
+ P.Eq('GET', P.Var('REQUEST_METHOD'))
-- (and
-- (gt 1000 (length (var 'REQUEST_URI')))
-- (eq 'GET' (var 'REQUEST_METHOD'))
-- )
----
The frontend also allows us to use object method syntax, where the object is passed in to the function as the last argument:
.Example: First Steps 4
----
P.Var('REQUEST_URI'):length():gt(1000)
+ P.Var('REQUEST_METHOD'):eq('GET')
-- (and
-- (gt 1000 (length (var 'REQUEST_URI')))
-- (eq 'GET' (var 'REQUEST_METHOD'))
-- )
----
When and whether to use such shortcuts is a matter of style. Use them if you believe they make the logic clearer.
==== Second Steps
Let's look for a suspicious filename in every parameter:
.Example: Second Steps 1
----
P.FOperator('rx', '/etc/(?:passwd|shadow)', P.Var('ARGS'))
-- (foperator 'rx' '/etc/(?:passwd|shadow)' (var 'ARGS'))
----
`P.FOperator()` is an example of using an IronBee operator. IronBee operators are functions provided by modules that can be used by any rule system, not just Predicate.
[NOTE]
See <<s.operator_and_foperator,Operator and FOperator>> for discussion on why `P.FOperator()` is used here.
Now let's limit to only GET and POST requests:
.Example: Second Steps 2
----
P.And(
P.FOperator('rx', '/etc/(?:passwd|shadow)', P.Var('ARGS')),
P.Or(
P.Eq('GET', P.Var('REQUEST_METHOD')),
P.Eq('POST', P.Var('REQUEST_METHOD'))
)
)
-- (and
-- (foperator 'rx' '/etc/(?:passwd|shadow)' (var 'ARGS'))
-- (or
-- (eq 'GET' (var 'REQUEST_METHOD'))
-- (eq 'POST' (var 'REQUEST_METHOD'))
-- )
-- )
----
There is a shortcut for logical OR, `/`. Using that and our other alternatives:
.Example: Second Steps 3
----
P.Var('ARGS'):foperator('rx', '/etc/(?:passwd|shadow)')
+ (
P.Var('REQUEST_METHOD'):eq('GET')
/ P.Var('REQUEST_METHOD'):eq('POST')
)
-- (and
-- (foperator 'rx' '/etc/(?:passwd|shadow)' (var 'ARGS'))
-- (or
-- (eq 'GET' (var 'REQUEST_METHOD'))
-- (eq 'POST' (var 'REQUEST_METHOD'))
-- )
-- )
----
[[s.composition]]
==== Composition
A primary motivation for Predicate is to allow easy composition of rule logic. The previous examples have not directly taken advantage of that. Since we are writing our Predicate expressions in Lua when can make use of Lua features such as variables and functions to compose logic.
Let's factor out some common pieces of logic, such as ``is a GET request'':
.Example: `IsGet`
----
local IsGet = P.Var('REQUEST_METHOD'):eq('GET')
-- (eq 'GET' (var 'REQUEST_METHOD))
----
And ``is a POST request'':
.Example: `IsPost`
----
local IsPost = P.Var('REQUEST_METHOD'):eq('POST')
-- (eq 'POST' (var 'REQUEST_METHOD))
----
The example from the previous section then becomes:
.Example: Composition
----
P.Var('ARGS'):foperator('rx', '/etc/(?:passwd|shadow)')
+ (IsGet / IsPost)
-- (and
-- (foperator 'rx' '/etc/(?:passwd|shadow)' (var 'ARGS'))
-- (or
-- (eq 'GET' (var 'REQUEST_METHOD'))
-- (eq 'POST' (var 'REQUEST_METHOD'))
-- )
-- )
----
Note how the use of intermediate Lua variables to hold pieces of expressions does not affect the resulting sexpr. I.e., this sort of composition is at the Lua level and happens before conversion to an sexpr. For a way to do composition post-sexpr, see <<s.templates,Templates>>.
We are not limited to variables. Consider:
[quote]
Header X is longer than 1000 bytes.
First, let's define a function to find the value of the ``Header X'':
.Example: `RequestHeader`
----
local function RequestHeader(which)
return P.Sub(which, P.Var('REQUEST_HEADERS'))
end
----
This function takes the name of a header and provides a predicate object representing the value of that header. It uses a new function, `P.Sub()`, which is used to select a specific member from a collection.
We can now use `RequestHeader()` to define a notion of a long header:
.Example: `LongHeader`
----
local function LongHeader(which)
return RequestHeader(which):length():gt(1000)
end
----
We can now use `LongHeader()` to express:
[quote]
The Host header is longer than 1000 bytes.
.Example: `LongHeader` usage
----
LongHeader('HOST')
-- (gt 1000 (length (sub 'Host' (var 'REQUEST_HEADERS))))
----
There is additional value to reusing pieces of logic. Predicate automatically detects any reused expressions across all Predicate expressions and only evaluates them once, reusing the result. This reuse can provide significant performance benefits.
==== Configuration Time
IronBee operates at two different times. At configuration time, it interprets its configuration and sets up any data structures it needs to evaluate traffic. At runtime (also called evaluation time), it interprets web traffic, determines which rules should be fired (involves evaluating predicate expressions), and fires those rules.
When using Predicate, there is a further distinction to be made at configuration time. There is computation that occurs in Lua and computation that occurs in Predicate. In Lua, the Lua code is executed to produce predicate objects which are turned into sexprs. Those sexprs are then passed to Predicate. Predicate merges all sexprs together and, once it has everything, performs validation and optimization passes.
This division has a number of implications. Two important ones are:
1. Some warnings and errors occur at the close of a configuration context and are in terms of sexprs rather than Lua code. In most cases, the Lua file and line number are provided with the error message.
2. Since Lua based composition is performed in Lua, the resulting SExprs that are communicated to Predicate can become quite large.
The use of <<s.templates,Templates>> can alleviate both of these problems.
Many Predicate functions support configuration time evaluation if all of their arguments are known at configuration time. For example, consider setting a policy variable in Lua:
.Example: Policy Variable
----
-- Change this to true to apply rule to Post requests.
local ApplyToPost = false
----
And then using it in a predicate expression, where `something_complicated` is some complex logic:
.Example: Using a Policy Variable
----
(IsGet / (ApplyToPost + IsPost)) + something_complicated
-- (and
-- (or
-- (eq 'GET' (var 'REQUEST_METHOD'))
-- (and (false) (eq 'POST' (var 'REQUEST_METHOD')))
-- )
-- something_complicated
-- )
----
Since `ApplyToPost` is false, this expressions will always be false, no matter what `something_complicated` turns out to be. Predicate understands this and transforms the entire expression to false at configuration time. These transformations allows for easy configuration or customization of rules while paying the performance cost only once, at configuration time.
==== Expressions and the DAG
Any predicate expression can be represented as a tree. For example:
.Example: Expression 1
----
P.Var('ARGS'):foperator('rx', '/etc/(?:passwd|shadow)')
+ (IsGet / IsPost)
-- (and
-- (foperator 'rx' '/etc/(?:passwd|shadow)' (var 'ARGS'))
-- (or
-- (eq 'GET' (var 'REQUEST_METHOD'))
-- (eq 'POST' (var 'REQUEST_METHOD'))
-- )
-- )
----
Corresponds to:
.Expression 1 as Tree
image::images/guide_1.png[Expression 1 as Tree]
(All of the images in this section were generated via the <<s.pp_dot,`pp_dot`>> tool.)
The DAG (directed acyclic graph) is the heart of Predicate. It is initially generated by taking the trees from the predicate expressions of every rule and merging common sub-trees together.
For example, consider this expression/tree:
.Example: Expressions 2
----
P.Gt(1000, P.Length(P.Var('REQUEST_URI')))
+ (IsGet / IsPost)
-- (and
-- (gt 1000 (length (var 'REQUEST_URI')))
-- (or
-- (eq 'GET' (var 'REQUEST_METHOD'))
-- (eq 'POST' (var 'REQUEST_METHOD'))
-- )
-- )
----
.Expression 2 as Tree
image::images/guide_2.png[Expression 2 as Tree]
We can add both of these expressions to the DAG, merging common sub-trees, to end up with:
.Expression 1 and 2 as DAG
image::images/guide_3.png[Expression 1 and 2 as DAG]
Merging common sub-expressions enables cross-expression optimization and result sharing.
One DAG per Context
^^^^^^^^^^^^^^^^^^^
Every configuration context has its own DAG. Each context also inherits any rules and associated predicate expressions from its parent context. Having per-context DAGs allows for differing policy to simplify each DAG in different ways.
DAG Lifecycle
^^^^^^^^^^^^^
A DAG goes through a sequence of changes once all expression trees are known.
1. All expression trees are combined to create the initial DAG, merging any common sub-trees.
2. A validation pass is performed, in which every node does a number of sanity checks.
3. A transformation pass is performed, in which every node is allowed to manipulate the DAG. For example, `(not (true))` will transform into a falsy value.
4. Repeat step 3 until the DAG doesn't change, i.e., there is nothing more to transform.
5. A final validation pass is performed.
After this process completes, the DAG is fixed. It will never again change in structure and can be used for evaluation.
DAG Evaluation
^^^^^^^^^^^^^^
DAG Evaluation is the process by which the values of nodes in the DAG are determined. When a node associated with a rule becomes truthy, that rule is fired. A DAG is evaluated on a per-transaction basis.
==== Values
We have made it this far without actually worrying about what the value returned by a function is. As an example of how values can be complex, consider the following expressions:
.Example: Expression
----
P.Var('ARGS'):sub('a'):length():gt(5)
-- (gt 5 (length (sub 'a' (var 'ARGS'))))
----
And consider the expression in the context of the following request:
.Example: Request
----
GET /example?a=123&a=123456
----
Here there are two parameters (members of `ARGS`) named `a`, one of which is longer than 5 bytes and one of which is not. How do we interpret the expression in this situation?
In a boolean sense, the expression is truthy and can accurately be interpreted as:
[quote]
Does any member of `ARGS` named `a` have length greater than 5.
As we will see, the actual value of the expression is:
.Example: Value
----
[a:'123456']
----
The result of any expression, including any literal, is called a 'Value'. A Value is a name, a type, and a value. Names are always strings. At present, the possible types with their values are:
String::
A sequence of bytes, possibly including NULs.
Number::
A signed integer.
Float::
A signed floating point.
List::
A list of Values.
In addition, there is a not-a-value Value called 'null' and written `:` (The null Value has no name or value). In Lua, it is available as `P.Null`.
In Predicate, null and any empty list are falsy. All other Values are truthy.
There is a subset of the sexpression grammar to describe values. Lists are enclosed in brackets, and names, when present, are specified via `name:value`. Here are some examples:
.Example: Literals
----
1.23
'Hello World'
['x' 'y' 'z']
named_list:[a:1 b:2 c:3]
----
There are a few more complications. Consider the expression:
.Example: Finished and Unfinished
----
P.Not(P.FOperator('rx' 'foo', P.Var('ARGS'))
-- (not (foperator 'rx' 'foo' (var 'ARGS')))
----
Meaning
[quote]
There is no argument with value containing `foo`.
The `ARGS` collection begins each transaction empty, potentially grows after the request URI is parsed, and potentially grows again once the body is parsed. Imagine we have seen the URI but not the body. If an argument containing `foo` appears in the URI, then this expression must be falsy, but if it does not, we cannot yet say whether it is truthy or falsy. Instead, we must wait for the request body to be parsed.
To accommodate `foo` appearing only in the body, Predicate allows list Values to grow. The result of `P.Var('ARGS')` begins as an empty list and may grow later. List Values are only allowed to grow, they may never shrink or change earlier elements. A consequence of this is that expressions may change from falsy to truthy but never from truthy to falsy. This allows Predicate to begin this expression as falsy and change it to truthy after the request body.
But if `foo` appears in the URI, we want to know that the expression is falsy immediately, if for no other reason than to not spend time evaluating it later. To accommodate this, every node has a notion of finished or not. Once a node is finished, it may not modify its list Value.
With this in hand, we can now describe how the expressions works:
- `P.Var('ARGS')` begins empty and unfinished. After the request URI is parsed, it may add any arguments in the request URI but stays unfinished. After the request body is parsed, it may add any arguments in the request body and becomes finished, knowing that no more arguments can appear.
- `P.FOperator('rx', 'foo', ...)` begins by checking its last argument. As that argument is an empty list, `P.FOperator()s` Value is an empty list. As that argument is unfinished, `P.FOperator()` is unfinished. When values are added to its last argument, it checks the new values and adds any that contain `foo` to its Value. Only when its second argument becomes finished, does it also become finished.
- `P.Not(...)` begins by checking its argument. As its argument is falsy and unfinished, `P.Not()` must be falsy and unfinished. It must be falsy because its argument may become truthy in the future: if `P.Not()` start truthy, it would have to change to falsy at that point, but functions are not allowed to change from truthy to falsy. `P.Not()` must remain falsy until it knows its result will not change, either when its argument becomes truthy (in which case, `P.Not()` knows itself will be falsy and can be finished) or when its argument becomes finished. In the example, if an argument containing `foo` appears in the request URI, then the first argument becomes truthy and `P.Not()` can become finished and falsy. If an argument containing `foo` never appears, that `P.Not()` can only become truthy and finished after its argument becomes falsy and finished; which happens after the request body.
These details can become complicated. It works out that `P.Not()` (and its related functions such as `P.Nand()`) are the main case where these details matter. In most other cases, it suffices to understand that if there are multiple values, a Predicate expression is truthy if it is ``true'' for any of the values. See <<s.functions,Functions>> for additional discussion.
[[s.templates]]
=== Templates
Templates are a feature for doing simple substitutions in the backend. They are similar to simple Lua functions, but doing the substitutions in the backend has several advantages, including:
1. Reduces initial sexpression length and complexity. In large rule sets, this can have noticeable performance implications. In all cases, it can simplify the pre-transformation DAG making it easier to understand.
2. Produces better error messages by allowing them to refer to the template name.
Consider the Lua functions from <<s.composition,Composition>>.
.Example: Functions from Composition
----
local function RequestHeader(which)
return P.Sub(which, P.Var('REQUEST_HEADERS'))
end
local function LongHeader(which)
return RequestHeader(which):length():gt(1000)
end
----
These simply replace part of an expression with an argument (`which`). That sort of direct substitution can be expressed via templates:
.Example: Templates
----
PUtil.Define('RequestHeader', ['which'],
P.Sub(P.Ref('which'), P.Var('REQUEST_HEADERS'))
)
-- (sub (ref 'which') (var 'REQUEST_HEADERS'))
PUtil.Define('LongHeader', ['which'],
P.RequestHeader(P.Ref('which')):length():gt(1000)
)
-- (gt 1000 (length (RequestHeader (ref 'which'))))
P.LongHeader('HOST')
-- (LongHeader 'HOST')
----
The main limitation of templates is that they can only do simple substitutions. Here is an example of a Lua function that has no easy template equivalent:
.Example: EtcFile
----
local function EtcFile(filename)
return P.Rx('^/etc/' .. filename .. '$', P.Var('REQUEST_URI'))
end
----
`EtcFile` constructs a regexp string from an argument; a task easily done in Lua but difficult in Predicate. `EtcFile` is best implemented as a Lua function, not as a template.
See link:reference.html[reference] and link:template.html[template] for additional discussion.
[[s.functions]]
=== Functions
This section provides an overview of the Predicate standard library. For a complete description, see link:reference.html[reference]. Also remember that any IronBee transformation or operator can be used in Predicate.
There are a few common concepts that tie Predicate functions together and provide for a consistent interface. The most important of these concepts are 'Primary', 'Map', and 'Filter'.
Primary functions take a single ``primary'' argument as input and use any other arguments as ``configuration''. For example, `P.Operator(op, parameter, input)` treats `input` as the primary argument and `op` and `parameter` as configuration: they inform how to process the primary argument. In all cases, the primary argument is last. This final position interacts well with the object method syntax, e.g.,
.Example: Object Method Syntax and Primary Arguments
----
P.Var('ARGS'):operator('rx', '(\w+)=(\w+)')
-- (operator 'rx' '(\w+)=(\w+)' (var 'ARGS'))
----
Primary functions are null and unfinished until all their secondary arguments are finished (secondary arguments are often but not always literals).
Map functions are Primary functions that apply a sub-function to every sub-value of their primary argument. The result of a Map function is the values of the sub-function. If the primary argument is not a list, then they apply the sub-function to the primary argument. For example:
.Example: Map Functions
----
P.Neg(2)
-- (neg 2)
-- Result: -2
P.Neg({1, 2, 3})
-- (neg [1 2 3])
-- Result: [-1 -2 -3]
----
Filter functions are Primary functions that apply a sub-function to every sub-value. The result of a Filter function is the inputs for which the sub-function is truthy. If the primary argument is not a list, then a Filter function returns the primary argument if the sub-function is truthy for it and null otherwise. For example:
.Example: Filter Functions
----
P.Eq(2, 2)
-- (eq 2 2)
-- Result: 2
P.Eq(2, 3)
-- (eq 2 3)
-- Result: :
P.Eq(2, {1, 2, 3, 2})
-- (eq 2 [1 2 3 2])
-- Result: [2 2]
----
See link:reference.html[reference] for additional concepts and discussion.
The standard library is divided into several sub-libraries. These are each briefly described below and are completely described in link:reference.html[reference].
==== Boolean
Predicate directly provides three basic boolean connectives: `and`, `or`, and `not`. The frontend adds several others implemented in terms of them: `xor`, `nxor`, `nand`, and `nor`. E.g.,
.Example: `P.Xor()`
----
P.Xor(a, b)
-- (or (and a (not b)) (and (not a) b))
----
The frontend also provides a variety of shortcuts:
- `a + b` is equivalent to `P.And(a, b)`.
- `a / b` is equivalent to `P.Or(a, b)`.
- `-a` is equivalent to `P.Not(a)`.
- `a - b` is equivalent to `a + (-b)`
- `P.Xor(a, b)` is equivalent to `(a - b) + (b - a)`.
- `a ^ b` is equivalent to `P.Xor(a, b)`.
- `P.Nand(a, b)` is equivalent to `-(a + b)`.
- `P.Nor(a, b)` is equivalent to `-(a / b)`.
- `P.Nxor(a, b)` is equivalent to `-(a ^ b)`.
Finally, there are canonical constants for providing true and false values:
.Example: `P.True and P.False`
----
P.True
-- (true)
P.False
-- (false)
----
The expressions `(true)` and `(false)` produce canonical truthy and falsy values, respectively. These are: `[:'']` for true, and `:` for false.
Finally, there is an if statement: `P.If(p, t, f)`, which takes the value of `t` if `p` is truthy and `f` if `p` is falsy.
==== List
Predicate provides a variety of functions for manipulating lists, including: manipulating names of elements, concatenation, construction, selecting specific elements, flattening lists of lists, and more.
==== String
Predicate provides a regexp based string replacement function and a length function.
==== Filters
Predicate provides filters for all the user operations: equality, less than, etc. It also provides filters for selecting by name.
==== Predicates
Predicates test arguments. There are predicates for length, being finished, being a literal, and being a list.
==== Math
Predicate provides the usual arithmetic operations along with min and max.
==== Phase
Predicate provides functions for carefully controlling how expressions interact with the current phase of evaluation. These are rarely needed.
==== IronBee
Predicate provides functions to access operators, transformations, and vars. If the `constant` module is being used, a function for accessing constants is also available.
==== Development
Predicates provides functions for testing and expression development. The most important for a rule writer is `P.P()`.
`P.P()` takes one or more arguments. Its result is always that of its final argument. When evaluated, it outputs the value of all arguments to standard error. This allows it to be used like a print statement inside an expression, e.g.,
.Example: `P.P()`
----
P.P('Top Result = ', P.And(
P.Gt(1000, P.Length(P.Var('REQUEST_URI'))),
P.Eq('GET', P.P('REQUEST_METHOD = ', P.Var('REQUEST_METHOD')))
)
-- (p 'Top Result = ' (and
-- (gt 1000 (length (var 'REQUEST_URI')))
-- (eq 'GET' (p 'REQUEST_METHOD = (var 'REQUEST_METHOD')))
-- ))
----
When this expression is evaluated, the result of the expression as the whole and of `P.Var('REQUEST_METHOD')` will be written to standard error.
Be aware that `P.P()` only outputs when actually evaluated. It may not be evaluated for various reasons including: a higher level boolean determined that it need not be; it was evaluated earlier and finished.
==== Templates
Predicate provides the `P.Ref()` function for use in templates. See <<s.templates,Templates>>.
=== Specific Advice
This section contains specific topics that have come up frequently.
==== Phaseless Rules
Predicate rules do not need to be tied to a specific phase. If a phase for them is specified, they are evaluated only in that phase and executed if they are truthy in that phase. If no phase is specified, they are evaluated
appropriately and executed at the earliest phase they are truthy in.
[[s.operator_and_foperator]]
==== Operator and FOperator
IronBee operators take an input and produce two outputs:
1. A true or false value.
2. Optionally, a ``capture collection''. A capture collection is always either null or a list value. Examples, including the captures from a regular expression match.
Predicate provides two functions to invoke operators, `P.Operator()` and `P.FOperator()`. The both act like filters in that they only produce results for inputs for which the operator returns true. They differ in the results they produce: `P.Operator()` produces the capture collections while `P.FOperator()` produces the passing inputs.
As a rule of thumb: If you don't care about the capture collection, use `P.FOperator()`.
As with any map-like or filter function, both functions behave differently when their input is not a list Value. In that case, if the operator returns false, both functions produce null. If the operator returns true, `P.Operator()` returns the capture collection and `P.FOperator()` returns the input.
There is a rare edge case: if an input is null, the output of `P.FOperator()` is always null and the output of `P.Operator()` is likely always falsy (either `[]` or null). In such a situation, it can be difficult to determine whether the operator returned true or false. In the future, another operator function may be introduced which outputs true or false depending on what the operator returns. Until then, if this situation matters to you, you must either explicitly test the input for nullness or use `P.Operator()` and explicitly check if the result is a (empty) list or null.
==== Short-Circuited Boolean Functions
The logical ``or'' and ``and'' functions come in short-circuited and non-short-circuited flavors. The short-circuited flavors are `P.OrSC()` and `P.AndSC()` and the non-short-circuited flavors are `P.And()` and `P.Or()`.
It may be tempting to always use the short-circuited flavors based on experience with other programming languages, but this temptation should be resisted. The non-short-circuited flavors have a significant advantage in that they do not care about the order of the arguments. For example, the following two expressions are equivalent, will merge in the DAG, and only be evaluated once:
.Example: `P.Or()`
----
P.Or(x, y)
P.Or(y, x)
----
As such, the non-short-circuited versions should be preferred except in cases when you know that evaluating a certain argument will be much more expensive than the others. In such cases, consider using `P.If()` instead if it makes such dependence clearer, e.g.,
.Example: Short-Circuiting
----
-- Worst.
P.And(should_do_expensive_check, expensive_check)
-- Bad.
P.AndSC(should_do_expensive_check, expensive_check)
-- Better.
P.If(should_do_expensive_check, expensive_check)
----
Finally, note that if `should_do_expensive_check` is known at configuration time, all of these will transform appropriately. The only case where short-circuiting matters is when `should_do_expensive_check` is only known at run time and `expensive_check` is expensive relative to `should_do_expensive_check`.
==== The PushName-Flatten Idiom
Consider applying a regular expression to a list of inputs:
.Example: Rx Captures...
----
P.Operator('rx', '\w{3}', [a:'123foo' b:' bar-'])
-- (operator 'rx' '\w{3}' [a:'123foo' b:' bar-'])
-- Result: [a:[0:'foo'] b:[0:'bar']
----
You know the capture collections will be a single element and you'd rather interact with those elements than the entire collection. You could flatten:
.Example: ... with `P.Flatten()` ...
----
P.Operator('rx', '\w{3}', [a:'123foo' b:' bar-']):flatten()
-- (flatten (operator 'rx' '\w{3}' [a:'123foo' b:' bar-']))
-- Result: [0:'foo' 0:'bar']
----
This result has the values you want but has lost the names. If you care about the names, you want to push them down first:
.Example: ... And with `P.PushName()`
----
P.Operator('rx', '\w{3}', [a:'123foo' b:' bar-']):pushName():flatten()
-- (flatten (pushName (operator 'rx' '\w{3}' [a:'123foo' b:' bar-'])))
-- Result: [a:'foo' b:'bar']
----
This combination of `P.PushName()` and `P.Flatten()` occurs regularly and is the PushName-Flatten idiom.
==== Interaction with actions and `set_predicate_vars`
A Predicate rule will fire if its expression is truthy. If that expression is a list Value, it will fire once for every Value in the list. This behavior matches the traditional IronBee rule system and allows for per-Value actions.
For per-value actions to be meaningful, they need to have access to each Value in turn. This is accomplished via two vars: `PREDICATE_VALUE` and `PREDICATE_VALUE_NAME` which hold the value and name of each Value in turn. For performance reasons, you must explicitly request that these vars be set by adding the `set_predicate_vars` action to your rule. The vars will then be available for all 'subsequent' actions.
=== Tools
==== PredicateTrace
PredicateTrace is a feature of the IronBee Predicate Rules module. When turned on, it outputs the DAG 'with the value of each node' at the end of every phase. It can be further be limited to only show the portions of the DAG that correspond to specific rules.
To use PredicateTrace add the `PredicateTrace` directive to your configuration file, specifying the trace file and rule ids to trace. Run IronBee (e.g., with clipp) and then run `predicate/render_ptrace.rb` on the resulting trace file. The output will be an HTML file.
See link:ptrace.pdf[] for details.
==== PP
PP is a program (`predicate/pp.rb`) that can be run on a Waggle file containing Predicate rules. It will extract all Predicate expressions from those rules, validate them, and produce an annotated HTML report that includes the sexprs, issues, and graphs.
See link:pp.pdf[].
[[s.pp_dot]]
==== PP Dot
PP Dot is a program (`predicate/pp_dot`) which PP uses to generate all its images. It can also be used directly. It takes sexpressions (possibly with labels) on standard in, one per line, and draws them according to the mode. Current modes include:
Tree::
Draw each sexpression as a tree. Does no sub-tree merging, transformation, or validation. Does not support labels or template definitions.
Expr::
Draw each sexpression as a graph. Does sub-tree merging, transformation, and validation on a per-expression basis but not between expressions. Does support template definitions. Does not support labels.
Graph::
Combine all sexpressions into a graph. Does sub-tree merging, transformation, and validation on the entire graph. Does support labels and template definitions.
If labels are supported they can be attached to sexpression by placing them before the sexpression on the line followed by a space.
Templates may be defined via a ``Define'' line, e.g.:
.Example: Define
----
Define LongHeader which,length (gt (ref 'length') (sub (ref 'which') (var 'REQUEST_HEADERS')))
----
All drawings are done via http://www.graphviz.org[GraphViz] dot format.
As an example, for the input:
.Example: PP Dot
----
Define LongHeader which,length (gt (ref 'length') (sub (ref 'which') (var 'REQUEST_HEADERS')))
root1 (LongHeader 'Host' 1000)
root2 (and (LongHeader 'Content-Length' 10) (eq 'GET' (var 'REQUEST_METHOD')))
----
The following two graphs are produced:
.Pre-Transformation Graph
image::images/guide_4.png[Pre-Transformation Graph]
.Post-Transformation Graph
image::images/guide_5.png[Post-Transformation Graph]
| {
"pile_set_name": "Github"
} |
<!DOCTYPE html>
<html>
<head>
<title>Raphael.Export Tests</title>
<link rel="stylesheet" href="qunit.css" type="text/css"/>
<style type="text/css">
#visual-tests {
font-family: Arial;
margin: 16px 16px;
}
.paper, .exported-paper {
border: 1px solid #ccc;
display: block;
float: left;
height: 240px;
margin-top: 16px;
margin-right: 8px;
width: 320px;
}
</style>
<script type="text/javascript" src="jquery.js"></script>
<script type="text/javascript" src="qunit.js"></script>
<script type="text/javascript" src="raphael.js"></script>
<script type="text/javascript" src="tiger.js"></script>
<script type="text/javascript" src="../raphael.export.js"></script>
<script type="text/javascript">
$(function() {
module('toSVG()');
test('is defined', function() {
equal(typeof Raphael.fn.toSVG, 'function');
});
var paper = Raphael('raphael-paper');
paper.rect(50, 40, 50, 50)
.rotate(10)
.scale(1.2)
.translate(10, 10)
;
paper.circle(50, 40, 20);
paper.set(
paper.rect(50, 40, 50, 50)
.attr({ fill: '135-#f00-#ff0', transform: 'T120,120', 'stroke-dasharray': '- ', 'stroke-width': 3 })
,
paper.circle(50, 40, 20)
.attr({ fill: '#f00', transform: 'T60,150', opacity: .5 })
.glow({ width: 50, color: '#F00' })
);
paper.path('M10,10L10,20L20,20L20,30')
.transform('T200,0')
;
paper.path('M10,10L10,20L20,20L20,30')
.rotate(-25)
.scale(1.75)
.translate(10,5)
;
paper.text(120, 30, 'Hello World')
.attr({
'text-anchor': 'start'
});
paper.text(300, 80, 'Medium Font,\nRight Anchored')
.attr({
'text-anchor': 'end',
'font-size': 16
});
paper.text(140, 120, 'Bigger Font, Middle Anchored')
.attr({
'text-anchor': 'middle',
'font-size': 20,
'transform': 'T40,20S1.2R10'
});
// Rounded rect
paper.rect(10, 160, 40, 40, 5)
var paperTiger = new Raphael('tiger');
paperTiger.add(tiger);
var svg = paper.toSVG();
var tigerSVG = paperTiger.toSVG();
if ( Raphael.svg === true ) {
// Currently doesn't work on Firefox 3.6
document.getElementById('raphael-export-paper').innerHTML = svg;
document.getElementById('tiger-exported').innerHTML = tigerSVG;
} else {
$('#raphael-export-paper')
.html('<p>Not an SVG Browser but here\'s the SVG exported.</p>')
.text(svg)
;
$('#tiger-exported').text(svg);
}
test('does not duplicate tag attributes', function() {
ok(svg.indexOf(' x="50" x="50"') === -1, 'Duplicate x="50" found');
});
test('escapes quotes', function() {
ok(svg.indexOf('"Arial"') > -1, '""Arial"" not found');
});
test('serializes text tag', function() {
equal($(svg).find('tspan:first').text(), 'Hello World', 'text present');
ok($(svg).find('tspan[dy]').length > 0, 'tspan dy attribute present');
ok($(svg).find('tspan[dy]:first"').attr('dy') !== '0', 'tspan dy attribute is not "0"');
ok($($(svg).find('text')[1]).attr('font-size') === '16px', 'tspan font-size attribute is "16px"');
});
test('serializes path tag', function() {
ok($(svg).find('path[d]').length > 0, 'path d attribute present');
});
});
</script>
</head>
<body>
<h1 id="qunit-header">Raphael.Export Tests</h1>
<h2 id="qunit-banner"></h2>
<div id="qunit-testrunner-toolbar"></div>
<h2 id="qunit-userAgent"></h2>
<ol id="qunit-tests"></ol>
<div id="qunit-fixture"></div>
<div id="visual-tests">
<h2>Visual Tests for SVG Browsers</h2>
<p>Left = Raphael, Right = Raphael.Export (via DOM insert)</p>
<div id="raphael-paper" class="paper"></div>
<div id="raphael-export-paper" class="exported-paper"></div>
<br style="clear: both;" />
<div id="tiger" class="paper"></div>
<div id="tiger-exported" class="exported-paper"></div>
</div>
</body>
</html>
| {
"pile_set_name": "Github"
} |
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// <functional>
// REQUIRES: c++98 || c++03 || c++11 || c++14
// mem_fun_ref_t
#include <functional>
#include <type_traits>
#include <cassert>
struct A
{
char a1() {return 5;}
short a2(int i) {return short(i+1);}
int a3() const {return 1;}
double a4(unsigned i) const {return i-1;}
};
int main(int, char**)
{
typedef std::mem_fun_ref_t<char, A> F;
static_assert((std::is_base_of<std::unary_function<A, char>, F>::value), "");
const F f(&A::a1);
A a;
assert(f(a) == 5);
return 0;
}
| {
"pile_set_name": "Github"
} |
using System;
using UnityEngine;
namespace Parabox.Stl
{
struct StlVector3 : IEquatable<StlVector3>
{
const float k_Resolution = 10000f;
public float x;
public float y;
public float z;
public StlVector3(Vector3 v)
{
x = v.x;
y = v.y;
z = v.z;
}
public StlVector3(float x, float y, float z)
{
this.x = x;
this.y = y;
this.z = z;
}
public static explicit operator Vector3(StlVector3 vec)
{
return new Vector3(vec.x, vec.y, vec.z);
}
public static explicit operator StlVector3(Vector3 vec)
{
return new StlVector3(vec);
}
public bool Equals(StlVector3 other)
{
return Mathf.Approximately(x, other.x)
&& Mathf.Approximately(y, other.y)
&& Mathf.Approximately(z, other.z);
}
public override bool Equals(object obj)
{
if (obj == null || !(obj is StlVector3))
return false;
return Equals((StlVector3) obj);
}
public override int GetHashCode()
{
// https://stackoverflow.com/questions/720177/default-implementation-for-object-gethashcode/720282#720282
unchecked
{
int hash = 27;
hash = (13 * hash) + (x * k_Resolution).GetHashCode();
hash = (13 * hash) + (y * k_Resolution).GetHashCode();
hash = (13 * hash) + (z * k_Resolution).GetHashCode();
return hash;
}
}
public static bool operator == (StlVector3 a, StlVector3 b)
{
return a.Equals(b);
}
public static bool operator != (StlVector3 a, StlVector3 b)
{
return ! a.Equals(b);
}
}
}
| {
"pile_set_name": "Github"
} |
/*
* Copyright (C) 2015 EDF SA
*
* This file is part of slurm-web.
*
* slurm-web is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* slurm-web is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with slurm-web. If not, see <http://www.gnu.org/licenses/>.
*
*/
var isIE = Boolean(document.documentMode);
require.config({
paths: {
text: '/javascript/requirejs/text.min',
jquery: '/javascript/jquery/jquery.min',
'jquery-tablesorter': '/javascript/jquery-tablesorter/jquery.tablesorter.min',
'jquery-flot': '/javascript/jquery-flot/jquery.flot.min',
'jquery-flot-pie': '/javascript/jquery-flot/jquery.flot.pie.min',
xdomain: '../../js/libraries/xdomain.min',
async: '/javascript/async/async.min',
handlebars: '/javascript/handlebars/handlebars',
bootstrap: '/javascript/bootstrap/js/bootstrap',
'bootstrap-typeahead': '/javascript/bootstrap/js/typeahead.jquery',
'bootstrap-tagsinput': '/javascript/bootstrap/js/bootstrap-tagsinput.min',
d3: '/javascript/d3/d3.min',
'token-utils': '../../js/utils/token',
'user-utils': '../../js/utils/user',
'date-utils': '../../js/utils/date',
'tablesorter-utils': '../../js/utils/tablesorter',
'jobs-utils': '../../js/utils/jobs',
'page-utils': '../../js/utils/page',
'ajax-utils': '../../js/utils/ajax',
'error-utils': '../../js/utils/error',
'flot-utils': '../../js/utils/flot',
'tagsinput-utils': '../../js/utils/tagsinput',
'string-helpers': '../../js/helpers/string',
'jobs-helpers': '../../js/helpers/jobs',
'number-helpers': '../../js/helpers/number',
'boolean-helpers': '../../js/helpers/boolean',
'date-helpers': '../../js/helpers/date',
'different-helpers': '../../js/helpers/different',
'keycode-helpers': '../../js/helpers/keycode',
'view-helpers': '../../js/helpers/view',
'2d-draw': '../../js/draw/2d-draw',
'colors-draw': '../../js/draw/colors',
'2d-intersections-draw': '../../js/draw/2d-intersections',
'2d-legend-draw': '../../js/draw/2d-legend',
'factor-draw': '../../js/draw/factor',
login: '../../js/core/login/login',
navbar: '../../js/core/navbar/navbar',
clusters: '../../js/core/clusters/clusters',
jobs: '../../js/modules/jobs/jobs',
racks: '../../js/modules/racks/racks',
'jobs-map': '../../js/modules/jobs-map/jobs-map',
partitions: '../../js/modules/partitions/partitions',
reservations: '../../js/modules/reservations/reservations',
qos: '../../js/modules/qos/qos',
'3d-view': '../../js/modules/3d-view/3d-view',
gantt: '../../js/modules/gantt/gantt',
topology: '../../js/modules/topology/topology',
'topology-utils': '../../js/utils/topology',
'fake-placeholder': '../../js/utils/fakePlaceholder'
},
shim: {
jquery: {
exports: '$'
},
'jquery-tablesorter': {
deps: [ 'jquery' ]
},
'jquery-flot': {
deps: [ 'jquery' ]
},
'jquery-flot-pie': {
deps: [ 'jquery', 'jquery-flot' ]
},
handlebars: {
exports: 'Handlebars'
},
bootstrap: {
deps: [ 'jquery' ]
},
'bootstrap-typeahead': {
deps: [ 'jquery' ],
init: function($) {
return require.s.contexts._.registry['typeahead.js'].factory($);
}
},
'bootstrap-tagsinput': {
deps: [ 'jquery', 'bootstrap', 'bootstrap-typeahead' ]
}
}
});
if (isIE) {
require([ 'xdomain' ], function(xdomain) { // eslint-disable-line global-require
var index, slaves = {};
for (index in window.clusters) {
slaves[window.clusters[index].api.url] = window.clusters[index].api.path + '/proxy';
}
xdomain.slaves(slaves);
});
}
require([
'jquery',
'page-utils',
'text!/slurm-web-conf/config.json',
'token-utils',
'user-utils',
'login',
'navbar',
'clusters',
'jobs',
'racks',
'jobs-map',
'qos',
'partitions',
'reservations',
'3d-view',
'gantt',
'topology',
'ajax-utils'
], function($, Page, config, token, user, Login, Navbar, Clusters, Jobs, Racks, JobsMap, QOS, Partitions, Reservations, D3View, Gantt, Topology, ajaxUtils) {
var clusters = null,
page = new Page(),
navbar = null;
config = JSON.parse(config);
clusters = new Clusters(config);
clusters.init();
String.prototype.capitalizeFirstLetter = function() {
return this.charAt(0).toUpperCase() + this.slice(1);
}
//define loader
var $loader = $('#loader').hide();
//show loader when page is loading
$(document).ajaxStart(function(){
$loader.show();
}) //hide when stop loading
.ajaxStop(function(){
$loader.hide();
});
$(document).on('loadPage', function(e, options) {
e.stopPropagation();
$(document).trigger('destroyNavbar');
navbar = new Navbar(options.config);
navbar.init();
$('title').html(options.config.cluster.name.capitalizeFirstLetter() + '\'s HPC Dashboard');
$(document).trigger('show', { page: config.STARTPAGE });
});
$(document).on('logout', function(e, onlyCurrentCluster) {
var cluster;
e.preventDefault();
function logout(cluster) {
token.removeToken(cluster);
user.removeUser(cluster);
}
if (!config.AUTOLOGIN || onlyCurrentCluster) {
// clear authentication on current cluster
logout(config.cluster);
} else {
// clear authentication on all clusters
for (cluster in window.clusters) {
logout(window.clusters[cluster]);
}
}
$(document).trigger('show', { page: config.cluster.authentication.enabled ? 'login' : config.STARTPAGE });
});
$(document).on('show', function(e, options) {
// check if the wanted page is accessible for the current user
var nextPage = options.page !== 'login' &&
navbar.availableViews.filter(function(view) {
return view.id === options.page;
}).length === 0 ? navbar.availableViews[0].id : options.page;
e.stopPropagation();
page.destroy(true);
$('#main > div').not('#flash').remove();
page = new Page();
if ($('#flash.display').length) {
$('#flash').show().removeClass('display');
} else {
$('#flash').hide().find('.alert').empty();
}
switch (nextPage) {
case 'login':
page = new Login(config);
break;
case 'jobs':
if (options.filter) {
page = new Jobs(config, options.filter);
} else {
page = new Jobs(config, null);
}
break;
case 'jobsmap':
page = new JobsMap(config);
break;
case 'partitions':
page = new Partitions(config);
break;
case 'reservations':
page = new Reservations(config);
break;
case 'qos':
page = new QOS(config);
break;
case 'racks':
page = new Racks(config);
break;
case '3dview':
page = new D3View(config);
break;
case 'gantt':
page = new Gantt(config);
break;
case 'topology':
page = new Topology(config);
break;
}
if (page.hasOwnProperty('init')) {
page.init();
}
if (page.hasOwnProperty('refresh')) {
page.refresh();
}
if (page.hasOwnProperty('stopRefresh') && config.REFRESHCHECKBOX) {
$("#refreshCheckbox").off("change"); // don't stack events
$("#refreshCheckbox").change(function() {
if (this.checked) {
page.refresh();
} else {
page.stopRefresh();
}
document.cookie = 'dashboardRefresh='+this.checked+'; max-age=31536000';
});
var cookieCheck = document.cookie.replace(/(?:(?:^|.*;\s*)dashboardRefresh\s*\=\s*([^;]*).*$)|^.*$/, "$1") !== "false";
$("#refreshCheckbox").prop('checked', cookieCheck);
if (!cookieCheck) {
page.stopRefresh();
}
$("#refreshCheckboxContainer").show();
} else {
$("#refreshCheckboxContainer").hide();
}
});
$(window).resize(function() {
$('body>.container-fluid').css({ 'margin-top': $('nav').height() + 'px' });
});
});
| {
"pile_set_name": "Github"
} |
/*
* Author....: Forest Belt, Computer Diagnostic Services, Inc.
* CIS ID....: ?
*
* This is an original work by Forest Belt and is placed in the
* public domain.
*
* Modification history:
* ---------------------
*
* Rev 1.2 15 Aug 1991 23:02:52 GLENN
* Forest Belt proofread/edited/cleaned up doc
*
* Rev 1.1 14 Jun 1991 19:51:00 GLENN
* Minor edit to file header
*
* Rev 1.0 01 Apr 1991 01:00:42 GLENN
* Nanforum Toolkit
*
*/
FUNCTION ft_BitSet( cInbyte, nBitpos )
IF HB_ISSTRING( cInbyte ) .AND. HB_ISNUMERIC( nBitpos )
RETURN hb_BChar( hb_bitSet( hb_BCode( cInbyte ), nBitpos ) )
ENDIF
RETURN NIL
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<Weavers xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="FodyWeavers.xsd">
<InlineIL SequencePoints="Debug" />
</Weavers> | {
"pile_set_name": "Github"
} |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<!-- Generated by javadoc (version 1.7.0_45) on Mon Nov 14 12:37:06 UTC 2016 -->
<meta http-equiv="Content-Type" content="text/html" charset="utf-8">
<title>ResourceHandler (Gradle API 3.2)</title>
<meta name="date" content="2016-11-14">
<link rel="stylesheet" type="text/css" href="../../../../javadoc.css" title="Style">
</head>
<body>
<script type="text/javascript"><!--
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="ResourceHandler (Gradle API 3.2)";
}
//-->
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar_top">
<!-- -->
</a><a href="#skip-navbar_top" title="Skip navigation links"></a><a name="navbar_top_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../overview-summary.html">Overview</a></li>
<li><a href="package-summary.html">Package</a></li>
<li class="navBarCell1Rev">Class</li>
<li><a href="package-tree.html">Tree</a></li>
<li><a href="../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../index-all.html">Index</a></li>
<li><a href="../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li><a href="../../../../org/gradle/api/resources/ResourceException.html" title="class in org.gradle.api.resources"><span class="strong">Prev Class</span></a></li>
<li><a href="../../../../org/gradle/api/resources/TextResource.html" title="interface in org.gradle.api.resources"><span class="strong">Next Class</span></a></li>
</ul>
<ul class="navList">
<li><a href="../../../../index.html?org/gradle/api/resources/ResourceHandler.html" target="_top">Frames</a></li>
<li><a href="ResourceHandler.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<div>
<ul class="subNavList">
<li>Summary: </li>
<li>Nested | </li>
<li>Field | </li>
<li>Constr | </li>
<li><a href="#method_summary">Method</a></li>
</ul>
<ul class="subNavList">
<li>Detail: </li>
<li>Field | </li>
<li>Constr | </li>
<li><a href="#method_detail">Method</a></li>
</ul>
</div>
<a name="skip-navbar_top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<!-- ======== START OF CLASS DATA ======== -->
<div class="header">
<div class="subTitle">org.gradle.api.resources</div>
<h2 title="Interface ResourceHandler" class="title">Interface ResourceHandler</h2>
</div>
<div class="contentContainer">
<div class="description">
<ul class="blockList">
<li class="blockList">
<hr>
<br>
<pre>public interface <span class="strong">ResourceHandler</span></pre>
<div class="block">Provides access to resource-specific utility methods, for example factory methods that create various resources.</div>
</li>
</ul>
</div>
<div class="summary">
<ul class="blockList">
<li class="blockList">
<!-- ========== METHOD SUMMARY =========== -->
<ul class="blockList">
<li class="blockList"><a name="method_summary">
<!-- -->
</a>
<h3>Method Summary</h3>
<table class="overviewSummary" border="0" cellpadding="3" cellspacing="0" summary="Method Summary table, listing methods, and an explanation">
<caption><span>Methods</span><span class="tabEnd"> </span></caption>
<tr>
<th class="colFirst" scope="col">Modifier and Type</th>
<th class="colLast" scope="col">Method and Description</th>
</tr>
<tr class="altColor">
<td class="colFirst"><code><a href="../../../../org/gradle/api/resources/ReadableResource.html" title="interface in org.gradle.api.resources">ReadableResource</a></code></td>
<td class="colLast"><code><strong><a href="../../../../org/gradle/api/resources/ResourceHandler.html#bzip2(java.lang.Object)">bzip2</a></strong>(<a href="https://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a> path)</code>
<div class="block">Creates resource that points to a bzip2 compressed file at the given path.</div>
</td>
</tr>
<tr class="rowColor">
<td class="colFirst"><code><a href="../../../../org/gradle/api/resources/TextResourceFactory.html" title="interface in org.gradle.api.resources">TextResourceFactory</a></code></td>
<td class="colLast"><code><strong><a href="../../../../org/gradle/api/resources/ResourceHandler.html#getText()">getText</a></strong>()</code>
<div class="block">Returns a factory for creating <code>TextResource</code>s from various sources such as
strings, files, and archive entries.</div>
</td>
</tr>
<tr class="altColor">
<td class="colFirst"><code><a href="../../../../org/gradle/api/resources/ReadableResource.html" title="interface in org.gradle.api.resources">ReadableResource</a></code></td>
<td class="colLast"><code><strong><a href="../../../../org/gradle/api/resources/ResourceHandler.html#gzip(java.lang.Object)">gzip</a></strong>(<a href="https://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a> path)</code>
<div class="block">Creates resource that points to a gzip compressed file at the given path.</div>
</td>
</tr>
</table>
</li>
</ul>
</li>
</ul>
</div>
<div class="details">
<ul class="blockList">
<li class="blockList">
<!-- ============ METHOD DETAIL ========== -->
<ul class="blockList">
<li class="blockList"><a name="method_detail">
<!-- -->
</a>
<h3>Method Detail</h3>
<a name="gzip(java.lang.Object)">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>gzip</h4>
<pre><a href="../../../../org/gradle/api/resources/ReadableResource.html" title="interface in org.gradle.api.resources">ReadableResource</a> gzip(<a href="https://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a> path)</pre>
<div class="block">Creates resource that points to a gzip compressed file at the given path.
The path is evaluated as per <a href="../../../../org/gradle/api/Project.html#file(java.lang.Object)"><code>Project.file(Object)</code></a>.</div>
<dl><dt><span class="strong">Parameters:</span></dt><dd><code>path</code> - The path evaluated as per <a href="../../../../org/gradle/api/Project.html#file(java.lang.Object)"><code>Project.file(Object)</code></a>.</dd></dl>
</li>
</ul>
<a name="bzip2(java.lang.Object)">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>bzip2</h4>
<pre><a href="../../../../org/gradle/api/resources/ReadableResource.html" title="interface in org.gradle.api.resources">ReadableResource</a> bzip2(<a href="https://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a> path)</pre>
<div class="block">Creates resource that points to a bzip2 compressed file at the given path.
The path is evaluated as per <a href="../../../../org/gradle/api/Project.html#file(java.lang.Object)"><code>Project.file(Object)</code></a>.</div>
<dl><dt><span class="strong">Parameters:</span></dt><dd><code>path</code> - The path evaluated as per <a href="../../../../org/gradle/api/Project.html#file(java.lang.Object)"><code>Project.file(Object)</code></a>.</dd></dl>
</li>
</ul>
<a name="getText()">
<!-- -->
</a>
<ul class="blockListLast">
<li class="blockList">
<h4>getText</h4>
<pre><a href="../../../../org/gradle/api/Incubating.html" title="annotation in org.gradle.api">@Incubating</a>
<a href="../../../../org/gradle/api/resources/TextResourceFactory.html" title="interface in org.gradle.api.resources">TextResourceFactory</a> getText()</pre>
<div class="block">Returns a factory for creating <code>TextResource</code>s from various sources such as
strings, files, and archive entries.</div>
<dl><dt><span class="strong">Returns:</span></dt><dd>a factory for creating <code>TextResource</code>s</dd><dt><span class="strong">Since:</span></dt>
<dd>2.2</dd></dl>
</li>
</ul>
</li>
</ul>
</li>
</ul>
</div>
</div>
<!-- ========= END OF CLASS DATA ========= -->
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar_bottom">
<!-- -->
</a><a href="#skip-navbar_bottom" title="Skip navigation links"></a><a name="navbar_bottom_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../overview-summary.html">Overview</a></li>
<li><a href="package-summary.html">Package</a></li>
<li class="navBarCell1Rev">Class</li>
<li><a href="package-tree.html">Tree</a></li>
<li><a href="../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../index-all.html">Index</a></li>
<li><a href="../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li><a href="../../../../org/gradle/api/resources/ResourceException.html" title="class in org.gradle.api.resources"><span class="strong">Prev Class</span></a></li>
<li><a href="../../../../org/gradle/api/resources/TextResource.html" title="interface in org.gradle.api.resources"><span class="strong">Next Class</span></a></li>
</ul>
<ul class="navList">
<li><a href="../../../../index.html?org/gradle/api/resources/ResourceHandler.html" target="_top">Frames</a></li>
<li><a href="ResourceHandler.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<div>
<ul class="subNavList">
<li>Summary: </li>
<li>Nested | </li>
<li>Field | </li>
<li>Constr | </li>
<li><a href="#method_summary">Method</a></li>
</ul>
<ul class="subNavList">
<li>Detail: </li>
<li>Field | </li>
<li>Constr | </li>
<li><a href="#method_detail">Method</a></li>
</ul>
</div>
<a name="skip-navbar_bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
</body>
</html>
| {
"pile_set_name": "Github"
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.