code
stringlengths
3
1.05M
repo_name
stringlengths
4
116
path
stringlengths
3
942
language
stringclasses
30 values
license
stringclasses
15 values
size
int32
3
1.05M
package fastly import ( "fmt" "sort" "time" ) // Logentries represents a logentries response from the Fastly API. type Logentries struct { ServiceID string `mapstructure:"service_id"` Version int `mapstructure:"version"` Name string `mapstructure:"name"` Port uint `mapstructure:"port"` UseTLS bool `mapstructure:"use_tls"` Token string `mapstructure:"token"` Format string `mapstructure:"format"` ResponseCondition string `mapstructure:"response_condition"` CreatedAt *time.Time `mapstructure:"created_at"` UpdatedAt *time.Time `mapstructure:"updated_at"` DeletedAt *time.Time `mapstructure:"deleted_at"` } // logentriesByName is a sortable list of logentries. type logentriesByName []*Logentries // Len, Swap, and Less implement the sortable interface. func (s logentriesByName) Len() int { return len(s) } func (s logentriesByName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s logentriesByName) Less(i, j int) bool { return s[i].Name < s[j].Name } // ListLogentriesInput is used as input to the ListLogentries function. type ListLogentriesInput struct { // Service is the ID of the service (required). Service string // Version is the specific configuration version (required). Version int } // ListLogentries returns the list of logentries for the configuration version. func (c *Client) ListLogentries(i *ListLogentriesInput) ([]*Logentries, error) { if i.Service == "" { return nil, ErrMissingService } if i.Version == 0 { return nil, ErrMissingVersion } path := fmt.Sprintf("/service/%s/version/%d/logging/logentries", i.Service, i.Version) resp, err := c.Get(path, nil) if err != nil { return nil, err } var ls []*Logentries if err := decodeJSON(&ls, resp.Body); err != nil { return nil, err } sort.Stable(logentriesByName(ls)) return ls, nil } // CreateLogentriesInput is used as input to the CreateLogentries function. type CreateLogentriesInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string Version int Name string `form:"name,omitempty"` Port uint `form:"port,omitempty"` UseTLS *Compatibool `form:"use_tls,omitempty"` Token string `form:"token,omitempty"` Format string `form:"format,omitempty"` ResponseCondition string `form:"response_condition,omitempty"` } // CreateLogentries creates a new Fastly logentries. func (c *Client) CreateLogentries(i *CreateLogentriesInput) (*Logentries, error) { if i.Service == "" { return nil, ErrMissingService } if i.Version == 0 { return nil, ErrMissingVersion } path := fmt.Sprintf("/service/%s/version/%d/logging/logentries", i.Service, i.Version) resp, err := c.PostForm(path, i, nil) if err != nil { return nil, err } var l *Logentries if err := decodeJSON(&l, resp.Body); err != nil { return nil, err } return l, nil } // GetLogentriesInput is used as input to the GetLogentries function. type GetLogentriesInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string Version int // Name is the name of the logentries to fetch. Name string } // GetLogentries gets the logentries configuration with the given parameters. func (c *Client) GetLogentries(i *GetLogentriesInput) (*Logentries, error) { if i.Service == "" { return nil, ErrMissingService } if i.Version == 0 { return nil, ErrMissingVersion } if i.Name == "" { return nil, ErrMissingName } path := fmt.Sprintf("/service/%s/version/%d/logging/logentries/%s", i.Service, i.Version, i.Name) resp, err := c.Get(path, nil) if err != nil { return nil, err } var l *Logentries if err := decodeJSON(&l, resp.Body); err != nil { return nil, err } return l, nil } // UpdateLogentriesInput is used as input to the UpdateLogentries function. type UpdateLogentriesInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string Version int // Name is the name of the logentries to update. Name string NewName string `form:"name,omitempty"` Port uint `form:"port,omitempty"` UseTLS *Compatibool `form:"use_tls,omitempty"` Token string `form:"token,omitempty"` Format string `form:"format,omitempty"` ResponseCondition string `form:"response_condition,omitempty"` } // UpdateLogentries updates a specific logentries. func (c *Client) UpdateLogentries(i *UpdateLogentriesInput) (*Logentries, error) { if i.Service == "" { return nil, ErrMissingService } if i.Version == 0 { return nil, ErrMissingVersion } if i.Name == "" { return nil, ErrMissingName } path := fmt.Sprintf("/service/%s/version/%d/logging/logentries/%s", i.Service, i.Version, i.Name) resp, err := c.PutForm(path, i, nil) if err != nil { return nil, err } var l *Logentries if err := decodeJSON(&l, resp.Body); err != nil { return nil, err } return l, nil } // DeleteLogentriesInput is the input parameter to DeleteLogentries. type DeleteLogentriesInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string Version int // Name is the name of the logentries to delete (required). Name string } // DeleteLogentries deletes the given logentries version. func (c *Client) DeleteLogentries(i *DeleteLogentriesInput) error { if i.Service == "" { return ErrMissingService } if i.Version == 0 { return ErrMissingVersion } if i.Name == "" { return ErrMissingName } path := fmt.Sprintf("/service/%s/version/%d/logging/logentries/%s", i.Service, i.Version, i.Name) resp, err := c.Delete(path, nil) if err != nil { return err } var r *statusResp if err := decodeJSON(&r, resp.Body); err != nil { return err } if !r.Ok() { return fmt.Errorf("Not Ok") } return nil }
TheWeatherCompany/terraform
vendor/github.com/sethvargo/go-fastly/logentries.go
GO
mpl-2.0
6,173
# Copyright (c) 2010 Twisted Matrix Laboratories. # See LICENSE for details. from twisted.application.service import ServiceMaker TwistedProcmon = ServiceMaker( "Twisted Process Monitor", "twisted.runner.procmontap", ("A process watchdog / supervisor"), "procmon")
eunchong/build
third_party/twisted_10_2/twisted/plugins/twisted_runner.py
Python
bsd-3-clause
283
/* * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software;Designed and Developed mainly by many Chinese * opensource volunteers. you can redistribute it and/or modify it under the * terms of the GNU General Public License version 2 only, as published by the * Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Any questions about this component can be directed to it's project Web address * https://code.google.com/p/opencloudb/. * */ package io.mycat.performance; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.SQLException; import java.text.DateFormat; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Calendar; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicLong; public class TravelRecordGlobalSeqInsertJob implements Runnable { private final long endId; private long finsihed; private final int batchSize; private final AtomicLong finshiedCount; private final AtomicLong failedCount; Calendar date = Calendar.getInstance(); DateFormat datafomat = new SimpleDateFormat("yyyy-MM-dd"); private final SimpleConPool conPool; public TravelRecordGlobalSeqInsertJob(SimpleConPool conPool, long totalRecords, int batchSize, long startId, AtomicLong finshiedCount, AtomicLong failedCount) { super(); this.conPool = conPool; this.endId = startId + totalRecords - 1; this.batchSize = batchSize; this.finsihed = startId; this.finshiedCount = finshiedCount; this.failedCount = failedCount; } private int insert(Connection con, List<Map<String, String>> list) throws SQLException { PreparedStatement ps; String sql = "insert into travelrecord (user_id,traveldate,fee,days) values(?,?,?,?,?)"; ps = con.prepareStatement(sql); for (Map<String, String> map : list) { //ps.setLong(1, Long.parseLong(map.get("id"))); ps.setString(1, (String) map.get("user_id")); ps.setString(2, (String) map.get("traveldate")); ps.setString(3, (String) map.get("fee")); ps.setString(4, (String) map.get("days")); ps.addBatch(); } ps.executeBatch(); con.commit(); ps.clearBatch(); ps.close(); return list.size(); } private List<Map<String, String>> getNextBatch() { if (finsihed >= endId) { return Collections.emptyList(); } long end = (finsihed + batchSize) < this.endId ? (finsihed + batchSize) : endId; // the last batch if (end + batchSize > this.endId) { end = this.endId; } List<Map<String, String>> list = new ArrayList<Map<String, String>>( ); for (long i = finsihed; i <= end; i++) { Map<String, String> m = new HashMap<String, String>(); m.put("id", i + ""); m.put("user_id", "user " + i); m.put("traveldate", getRandomDay(i)); m.put("fee", i % 10000 + ""); m.put("days", i % 10000 + ""); list.add(m); } // System.out.println("finsihed :" + finsihed + "-" + end); finsihed += list.size(); return list; } private String getRandomDay(long i) { int month = Long.valueOf(i % 11 + 1).intValue(); int day = Long.valueOf(i % 27 + 1).intValue(); date.set(Calendar.MONTH, month); date.set(Calendar.DAY_OF_MONTH, day); return datafomat.format(date.getTime()); } @Override public void run() { Connection con = null; try { List<Map<String, String>> batch = getNextBatch(); while (!batch.isEmpty()) { try { if (con == null || con.isClosed()) { con = conPool.getConnection(); con.setAutoCommit(false); } insert(con, batch); finshiedCount.addAndGet(batch.size()); } catch (Exception e) { e.printStackTrace(); try { con.rollback(); } catch (SQLException e1) { e1.printStackTrace(); e1.printStackTrace(); } failedCount.addAndGet(batch.size()); } batch = getNextBatch(); } } finally { if (con != null) { this.conPool.returnCon(con); } } } }
gavin57688/Mycat-Server-1.6
src/test/java/io/mycat/performance/TravelRecordGlobalSeqInsertJob.java
Java
gpl-2.0
4,605
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* Copyright(c) 2018-2019 Realtek Corporation */ #include "main.h" #include "tx.h" #include "fw.h" #include "ps.h" #include "debug.h" static void rtw_tx_stats(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, struct sk_buff *skb) { struct ieee80211_hdr *hdr; struct rtw_vif *rtwvif; hdr = (struct ieee80211_hdr *)skb->data; if (!ieee80211_is_data(hdr->frame_control)) return; if (!is_broadcast_ether_addr(hdr->addr1) && !is_multicast_ether_addr(hdr->addr1)) { rtwdev->stats.tx_unicast += skb->len; rtwdev->stats.tx_cnt++; if (vif) { rtwvif = (struct rtw_vif *)vif->drv_priv; rtwvif->stats.tx_unicast += skb->len; rtwvif->stats.tx_cnt++; } } } void rtw_tx_fill_tx_desc(struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb) { __le32 *txdesc = (__le32 *)skb->data; SET_TX_DESC_TXPKTSIZE(txdesc, pkt_info->tx_pkt_size); SET_TX_DESC_OFFSET(txdesc, pkt_info->offset); SET_TX_DESC_PKT_OFFSET(txdesc, pkt_info->pkt_offset); SET_TX_DESC_QSEL(txdesc, pkt_info->qsel); SET_TX_DESC_BMC(txdesc, pkt_info->bmc); SET_TX_DESC_RATE_ID(txdesc, pkt_info->rate_id); SET_TX_DESC_DATARATE(txdesc, pkt_info->rate); SET_TX_DESC_DISDATAFB(txdesc, pkt_info->dis_rate_fallback); SET_TX_DESC_USE_RATE(txdesc, pkt_info->use_rate); SET_TX_DESC_SEC_TYPE(txdesc, pkt_info->sec_type); SET_TX_DESC_DATA_BW(txdesc, pkt_info->bw); SET_TX_DESC_SW_SEQ(txdesc, pkt_info->seq); SET_TX_DESC_MAX_AGG_NUM(txdesc, pkt_info->ampdu_factor); SET_TX_DESC_AMPDU_DENSITY(txdesc, pkt_info->ampdu_density); SET_TX_DESC_DATA_STBC(txdesc, pkt_info->stbc); SET_TX_DESC_DATA_LDPC(txdesc, pkt_info->ldpc); SET_TX_DESC_AGG_EN(txdesc, pkt_info->ampdu_en); SET_TX_DESC_LS(txdesc, pkt_info->ls); SET_TX_DESC_DATA_SHORT(txdesc, pkt_info->short_gi); SET_TX_DESC_SPE_RPT(txdesc, pkt_info->report); SET_TX_DESC_SW_DEFINE(txdesc, pkt_info->sn); SET_TX_DESC_USE_RTS(txdesc, pkt_info->rts); SET_TX_DESC_DISQSELSEQ(txdesc, pkt_info->dis_qselseq); SET_TX_DESC_EN_HWSEQ(txdesc, pkt_info->en_hwseq); SET_TX_DESC_HW_SSN_SEL(txdesc, pkt_info->hw_ssn_sel); SET_TX_DESC_NAVUSEHDR(txdesc, pkt_info->nav_use_hdr); SET_TX_DESC_BT_NULL(txdesc, pkt_info->bt_null); } EXPORT_SYMBOL(rtw_tx_fill_tx_desc); static u8 get_tx_ampdu_factor(struct ieee80211_sta *sta) { u8 exp = sta->ht_cap.ampdu_factor; /* the least ampdu factor is 8K, and the value in the tx desc is the * max aggregation num, which represents val * 2 packets can be * aggregated in an AMPDU, so here we should use 8/2=4 as the base */ return (BIT(2) << exp) - 1; } static u8 get_tx_ampdu_density(struct ieee80211_sta *sta) { return sta->ht_cap.ampdu_density; } static u8 get_highest_ht_tx_rate(struct rtw_dev *rtwdev, struct ieee80211_sta *sta) { u8 rate; if (rtwdev->hal.rf_type == RF_2T2R && sta->ht_cap.mcs.rx_mask[1] != 0) rate = DESC_RATEMCS15; else rate = DESC_RATEMCS7; return rate; } static u8 get_highest_vht_tx_rate(struct rtw_dev *rtwdev, struct ieee80211_sta *sta) { struct rtw_efuse *efuse = &rtwdev->efuse; u8 rate; u16 tx_mcs_map; tx_mcs_map = le16_to_cpu(sta->vht_cap.vht_mcs.tx_mcs_map); if (efuse->hw_cap.nss == 1) { switch (tx_mcs_map & 0x3) { case IEEE80211_VHT_MCS_SUPPORT_0_7: rate = DESC_RATEVHT1SS_MCS7; break; case IEEE80211_VHT_MCS_SUPPORT_0_8: rate = DESC_RATEVHT1SS_MCS8; break; default: case IEEE80211_VHT_MCS_SUPPORT_0_9: rate = DESC_RATEVHT1SS_MCS9; break; } } else if (efuse->hw_cap.nss >= 2) { switch ((tx_mcs_map & 0xc) >> 2) { case IEEE80211_VHT_MCS_SUPPORT_0_7: rate = DESC_RATEVHT2SS_MCS7; break; case IEEE80211_VHT_MCS_SUPPORT_0_8: rate = DESC_RATEVHT2SS_MCS8; break; default: case IEEE80211_VHT_MCS_SUPPORT_0_9: rate = DESC_RATEVHT2SS_MCS9; break; } } else { rate = DESC_RATEVHT1SS_MCS9; } return rate; } static void rtw_tx_report_enable(struct rtw_dev *rtwdev, struct rtw_tx_pkt_info *pkt_info) { struct rtw_tx_report *tx_report = &rtwdev->tx_report; /* [11:8], reserved, fills with zero * [7:2], tx report sequence number * [1:0], firmware use, fills with zero */ pkt_info->sn = (atomic_inc_return(&tx_report->sn) << 2) & 0xfc; pkt_info->report = true; } void rtw_tx_report_purge_timer(struct timer_list *t) { struct rtw_dev *rtwdev = from_timer(rtwdev, t, tx_report.purge_timer); struct rtw_tx_report *tx_report = &rtwdev->tx_report; unsigned long flags; if (skb_queue_len(&tx_report->queue) == 0) return; WARN(1, "purge skb(s) not reported by firmware\n"); spin_lock_irqsave(&tx_report->q_lock, flags); skb_queue_purge(&tx_report->queue); spin_unlock_irqrestore(&tx_report->q_lock, flags); } void rtw_tx_report_enqueue(struct rtw_dev *rtwdev, struct sk_buff *skb, u8 sn) { struct rtw_tx_report *tx_report = &rtwdev->tx_report; unsigned long flags; u8 *drv_data; /* pass sn to tx report handler through driver data */ drv_data = (u8 *)IEEE80211_SKB_CB(skb)->status.status_driver_data; *drv_data = sn; spin_lock_irqsave(&tx_report->q_lock, flags); __skb_queue_tail(&tx_report->queue, skb); spin_unlock_irqrestore(&tx_report->q_lock, flags); mod_timer(&tx_report->purge_timer, jiffies + RTW_TX_PROBE_TIMEOUT); } EXPORT_SYMBOL(rtw_tx_report_enqueue); static void rtw_tx_report_tx_status(struct rtw_dev *rtwdev, struct sk_buff *skb, bool acked) { struct ieee80211_tx_info *info; info = IEEE80211_SKB_CB(skb); ieee80211_tx_info_clear_status(info); if (acked) info->flags |= IEEE80211_TX_STAT_ACK; else info->flags &= ~IEEE80211_TX_STAT_ACK; ieee80211_tx_status_irqsafe(rtwdev->hw, skb); } void rtw_tx_report_handle(struct rtw_dev *rtwdev, struct sk_buff *skb, int src) { struct rtw_tx_report *tx_report = &rtwdev->tx_report; struct rtw_c2h_cmd *c2h; struct sk_buff *cur, *tmp; unsigned long flags; u8 sn, st; u8 *n; c2h = get_c2h_from_skb(skb); if (src == C2H_CCX_TX_RPT) { sn = GET_CCX_REPORT_SEQNUM_V0(c2h->payload); st = GET_CCX_REPORT_STATUS_V0(c2h->payload); } else { sn = GET_CCX_REPORT_SEQNUM_V1(c2h->payload); st = GET_CCX_REPORT_STATUS_V1(c2h->payload); } spin_lock_irqsave(&tx_report->q_lock, flags); skb_queue_walk_safe(&tx_report->queue, cur, tmp) { n = (u8 *)IEEE80211_SKB_CB(cur)->status.status_driver_data; if (*n == sn) { __skb_unlink(cur, &tx_report->queue); rtw_tx_report_tx_status(rtwdev, cur, st == 0); break; } } spin_unlock_irqrestore(&tx_report->q_lock, flags); } static void rtw_tx_pkt_info_update_rate(struct rtw_dev *rtwdev, struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb) { if (rtwdev->hal.current_band_type == RTW_BAND_2G) { pkt_info->rate_id = RTW_RATEID_B_20M; pkt_info->rate = DESC_RATE1M; } else { pkt_info->rate_id = RTW_RATEID_G; pkt_info->rate = DESC_RATE6M; } pkt_info->use_rate = true; pkt_info->dis_rate_fallback = true; } static void rtw_tx_pkt_info_update_sec(struct rtw_dev *rtwdev, struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); u8 sec_type = 0; if (info && info->control.hw_key) { struct ieee80211_key_conf *key = info->control.hw_key; switch (key->cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: case WLAN_CIPHER_SUITE_TKIP: sec_type = 0x01; break; case WLAN_CIPHER_SUITE_CCMP: sec_type = 0x03; break; default: break; } } pkt_info->sec_type = sec_type; } static void rtw_tx_mgmt_pkt_info_update(struct rtw_dev *rtwdev, struct rtw_tx_pkt_info *pkt_info, struct ieee80211_sta *sta, struct sk_buff *skb) { rtw_tx_pkt_info_update_rate(rtwdev, pkt_info, skb); pkt_info->dis_qselseq = true; pkt_info->en_hwseq = true; pkt_info->hw_ssn_sel = 0; /* TODO: need to change hw port and hw ssn sel for multiple vifs */ } static void rtw_tx_data_pkt_info_update(struct rtw_dev *rtwdev, struct rtw_tx_pkt_info *pkt_info, struct ieee80211_sta *sta, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct rtw_sta_info *si; u16 seq; u8 ampdu_factor = 0; u8 ampdu_density = 0; bool ampdu_en = false; u8 rate = DESC_RATE6M; u8 rate_id = 6; u8 bw = RTW_CHANNEL_WIDTH_20; bool stbc = false; bool ldpc = false; seq = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4; /* for broadcast/multicast, use default values */ if (!sta) goto out; if (info->flags & IEEE80211_TX_CTL_AMPDU) { ampdu_en = true; ampdu_factor = get_tx_ampdu_factor(sta); ampdu_density = get_tx_ampdu_density(sta); } if (info->control.use_rts) pkt_info->rts = true; if (sta->vht_cap.vht_supported) rate = get_highest_vht_tx_rate(rtwdev, sta); else if (sta->ht_cap.ht_supported) rate = get_highest_ht_tx_rate(rtwdev, sta); else if (sta->supp_rates[0] <= 0xf) rate = DESC_RATE11M; else rate = DESC_RATE54M; si = (struct rtw_sta_info *)sta->drv_priv; bw = si->bw_mode; rate_id = si->rate_id; stbc = si->stbc_en; ldpc = si->ldpc_en; out: pkt_info->seq = seq; pkt_info->ampdu_factor = ampdu_factor; pkt_info->ampdu_density = ampdu_density; pkt_info->ampdu_en = ampdu_en; pkt_info->rate = rate; pkt_info->rate_id = rate_id; pkt_info->bw = bw; pkt_info->stbc = stbc; pkt_info->ldpc = ldpc; } void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev, struct rtw_tx_pkt_info *pkt_info, struct ieee80211_sta *sta, struct sk_buff *skb) { struct rtw_chip_info *chip = rtwdev->chip; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct rtw_sta_info *si; struct ieee80211_vif *vif = NULL; __le16 fc = hdr->frame_control; bool bmc; if (sta) { si = (struct rtw_sta_info *)sta->drv_priv; vif = si->vif; } if (ieee80211_is_mgmt(fc) || ieee80211_is_nullfunc(fc)) rtw_tx_mgmt_pkt_info_update(rtwdev, pkt_info, sta, skb); else if (ieee80211_is_data(fc)) rtw_tx_data_pkt_info_update(rtwdev, pkt_info, sta, skb); bmc = is_broadcast_ether_addr(hdr->addr1) || is_multicast_ether_addr(hdr->addr1); if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) rtw_tx_report_enable(rtwdev, pkt_info); pkt_info->bmc = bmc; rtw_tx_pkt_info_update_sec(rtwdev, pkt_info, skb); pkt_info->tx_pkt_size = skb->len; pkt_info->offset = chip->tx_pkt_desc_sz; pkt_info->qsel = skb->priority; pkt_info->ls = true; /* maybe merge with tx status ? */ rtw_tx_stats(rtwdev, vif, skb); } void rtw_tx_rsvd_page_pkt_info_update(struct rtw_dev *rtwdev, struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb, enum rtw_rsvd_packet_type type) { struct rtw_chip_info *chip = rtwdev->chip; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; bool bmc; /* A beacon or dummy reserved page packet indicates that it is the first * reserved page, and the qsel of it will be set in each hci. */ if (type != RSVD_BEACON && type != RSVD_DUMMY) pkt_info->qsel = TX_DESC_QSEL_MGMT; rtw_tx_pkt_info_update_rate(rtwdev, pkt_info, skb); bmc = is_broadcast_ether_addr(hdr->addr1) || is_multicast_ether_addr(hdr->addr1); pkt_info->bmc = bmc; pkt_info->tx_pkt_size = skb->len; pkt_info->offset = chip->tx_pkt_desc_sz; pkt_info->ls = true; if (type == RSVD_PS_POLL) { pkt_info->nav_use_hdr = true; } else { pkt_info->dis_qselseq = true; pkt_info->en_hwseq = true; pkt_info->hw_ssn_sel = 0; } if (type == RSVD_QOS_NULL) pkt_info->bt_null = true; rtw_tx_pkt_info_update_sec(rtwdev, pkt_info, skb); /* TODO: need to change hw port and hw ssn sel for multiple vifs */ } struct sk_buff * rtw_tx_write_data_rsvd_page_get(struct rtw_dev *rtwdev, struct rtw_tx_pkt_info *pkt_info, u8 *buf, u32 size) { struct rtw_chip_info *chip = rtwdev->chip; struct sk_buff *skb; u32 tx_pkt_desc_sz; u32 length; tx_pkt_desc_sz = chip->tx_pkt_desc_sz; length = size + tx_pkt_desc_sz; skb = dev_alloc_skb(length); if (!skb) { rtw_err(rtwdev, "failed to alloc write data rsvd page skb\n"); return NULL; } skb_reserve(skb, tx_pkt_desc_sz); skb_put_data(skb, buf, size); rtw_tx_rsvd_page_pkt_info_update(rtwdev, pkt_info, skb, RSVD_BEACON); return skb; } EXPORT_SYMBOL(rtw_tx_write_data_rsvd_page_get); struct sk_buff * rtw_tx_write_data_h2c_get(struct rtw_dev *rtwdev, struct rtw_tx_pkt_info *pkt_info, u8 *buf, u32 size) { struct rtw_chip_info *chip = rtwdev->chip; struct sk_buff *skb; u32 tx_pkt_desc_sz; u32 length; tx_pkt_desc_sz = chip->tx_pkt_desc_sz; length = size + tx_pkt_desc_sz; skb = dev_alloc_skb(length); if (!skb) { rtw_err(rtwdev, "failed to alloc write data h2c skb\n"); return NULL; } skb_reserve(skb, tx_pkt_desc_sz); skb_put_data(skb, buf, size); pkt_info->tx_pkt_size = size; return skb; } EXPORT_SYMBOL(rtw_tx_write_data_h2c_get); void rtw_tx(struct rtw_dev *rtwdev, struct ieee80211_tx_control *control, struct sk_buff *skb) { struct rtw_tx_pkt_info pkt_info = {0}; int ret; rtw_tx_pkt_info_update(rtwdev, &pkt_info, control->sta, skb); ret = rtw_hci_tx_write(rtwdev, &pkt_info, skb); if (ret) { rtw_err(rtwdev, "failed to write TX skb to HCI\n"); goto out; } rtw_hci_tx_kick_off(rtwdev); return; out: ieee80211_free_txskb(rtwdev->hw, skb); } static void rtw_txq_check_agg(struct rtw_dev *rtwdev, struct rtw_txq *rtwtxq, struct sk_buff *skb) { struct ieee80211_txq *txq = rtwtxq_to_txq(rtwtxq); struct ieee80211_tx_info *info; struct rtw_sta_info *si; if (test_bit(RTW_TXQ_AMPDU, &rtwtxq->flags)) { info = IEEE80211_SKB_CB(skb); info->flags |= IEEE80211_TX_CTL_AMPDU; return; } if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO) return; if (test_bit(RTW_TXQ_BLOCK_BA, &rtwtxq->flags)) return; if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE))) return; if (!txq->sta) return; si = (struct rtw_sta_info *)txq->sta->drv_priv; set_bit(txq->tid, si->tid_ba); ieee80211_queue_work(rtwdev->hw, &rtwdev->ba_work); } static int rtw_txq_push_skb(struct rtw_dev *rtwdev, struct rtw_txq *rtwtxq, struct sk_buff *skb) { struct ieee80211_txq *txq = rtwtxq_to_txq(rtwtxq); struct rtw_tx_pkt_info pkt_info = {0}; int ret; rtw_txq_check_agg(rtwdev, rtwtxq, skb); rtw_tx_pkt_info_update(rtwdev, &pkt_info, txq->sta, skb); ret = rtw_hci_tx_write(rtwdev, &pkt_info, skb); if (ret) { rtw_err(rtwdev, "failed to write TX skb to HCI\n"); return ret; } rtwtxq->last_push = jiffies; return 0; } static struct sk_buff *rtw_txq_dequeue(struct rtw_dev *rtwdev, struct rtw_txq *rtwtxq) { struct ieee80211_txq *txq = rtwtxq_to_txq(rtwtxq); struct sk_buff *skb; skb = ieee80211_tx_dequeue(rtwdev->hw, txq); if (!skb) return NULL; return skb; } static void rtw_txq_push(struct rtw_dev *rtwdev, struct rtw_txq *rtwtxq, unsigned long frames) { struct sk_buff *skb; int ret; int i; rcu_read_lock(); for (i = 0; i < frames; i++) { skb = rtw_txq_dequeue(rtwdev, rtwtxq); if (!skb) break; ret = rtw_txq_push_skb(rtwdev, rtwtxq, skb); if (ret) { rtw_err(rtwdev, "failed to pusk skb, ret %d\n", ret); break; } } rcu_read_unlock(); } void rtw_tx_tasklet(struct tasklet_struct *t) { struct rtw_dev *rtwdev = from_tasklet(rtwdev, t, tx_tasklet); struct rtw_txq *rtwtxq, *tmp; spin_lock_bh(&rtwdev->txq_lock); list_for_each_entry_safe(rtwtxq, tmp, &rtwdev->txqs, list) { struct ieee80211_txq *txq = rtwtxq_to_txq(rtwtxq); unsigned long frame_cnt; unsigned long byte_cnt; ieee80211_txq_get_depth(txq, &frame_cnt, &byte_cnt); rtw_txq_push(rtwdev, rtwtxq, frame_cnt); list_del_init(&rtwtxq->list); } rtw_hci_tx_kick_off(rtwdev); spin_unlock_bh(&rtwdev->txq_lock); } void rtw_txq_init(struct rtw_dev *rtwdev, struct ieee80211_txq *txq) { struct rtw_txq *rtwtxq; if (!txq) return; rtwtxq = (struct rtw_txq *)txq->drv_priv; INIT_LIST_HEAD(&rtwtxq->list); } void rtw_txq_cleanup(struct rtw_dev *rtwdev, struct ieee80211_txq *txq) { struct rtw_txq *rtwtxq; if (!txq) return; rtwtxq = (struct rtw_txq *)txq->drv_priv; spin_lock_bh(&rtwdev->txq_lock); if (!list_empty(&rtwtxq->list)) list_del_init(&rtwtxq->list); spin_unlock_bh(&rtwdev->txq_lock); }
GuillaumeSeren/linux
drivers/net/wireless/realtek/rtw88/tx.c
C
gpl-2.0
16,249
/* * Copyright (C) 2008 Maarten Maathuis. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial * portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ #include "drmP.h" #include "drm_crtc_helper.h" #define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO) #include "nouveau_reg.h" #include "nouveau_drv.h" #include "nouveau_dma.h" #include "nouveau_encoder.h" #include "nouveau_connector.h" #include "nouveau_crtc.h" #include "nv50_display.h" static void nv50_dac_disconnect(struct drm_encoder *encoder) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct drm_device *dev = encoder->dev; struct nouveau_channel *evo = nv50_display(dev)->master; int ret; if (!nv_encoder->crtc) return; nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true); NV_DEBUG_KMS(dev, "Disconnecting DAC %d\n", nv_encoder->or); ret = RING_SPACE(evo, 4); if (ret) { NV_ERROR(dev, "no space while disconnecting DAC\n"); return; } BEGIN_NV04(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 1); OUT_RING (evo, 0); BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1); OUT_RING (evo, 0); nv_encoder->crtc = NULL; } static enum drm_connector_status nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct drm_device *dev = encoder->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; enum drm_connector_status status = connector_status_disconnected; uint32_t dpms_state, load_pattern, load_state; int or = nv_encoder->or; nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL1(or), 0x00000001); dpms_state = nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or)); nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), 0x00150000 | NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING); if (!nv_wait(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) { NV_ERROR(dev, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or); NV_ERROR(dev, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or, nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or))); return status; } /* Use bios provided value if possible. */ if (dev_priv->vbios.dactestval) { load_pattern = dev_priv->vbios.dactestval; NV_DEBUG_KMS(dev, "Using bios provided load_pattern of %d\n", load_pattern); } else { load_pattern = 340; NV_DEBUG_KMS(dev, "Using default load_pattern of %d\n", load_pattern); } nv_wr32(dev, NV50_PDISPLAY_DAC_LOAD_CTRL(or), NV50_PDISPLAY_DAC_LOAD_CTRL_ACTIVE | load_pattern); mdelay(45); /* give it some time to process */ load_state = nv_rd32(dev, NV50_PDISPLAY_DAC_LOAD_CTRL(or)); nv_wr32(dev, NV50_PDISPLAY_DAC_LOAD_CTRL(or), 0); nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), dpms_state | NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING); if ((load_state & NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT) == NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT) status = connector_status_connected; if (status == connector_status_connected) NV_DEBUG_KMS(dev, "Load was detected on output with or %d\n", or); else NV_DEBUG_KMS(dev, "Load was not detected on output with or %d\n", or); return status; } static void nv50_dac_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); uint32_t val; int or = nv_encoder->or; NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode); /* wait for it to be done */ if (!nv_wait(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) { NV_ERROR(dev, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or); NV_ERROR(dev, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or, nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or))); return; } val = nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or)) & ~0x7F; if (mode != DRM_MODE_DPMS_ON) val |= NV50_PDISPLAY_DAC_DPMS_CTRL_BLANKED; switch (mode) { case DRM_MODE_DPMS_STANDBY: val |= NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF; break; case DRM_MODE_DPMS_SUSPEND: val |= NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF; break; case DRM_MODE_DPMS_OFF: val |= NV50_PDISPLAY_DAC_DPMS_CTRL_OFF; val |= NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF; val |= NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF; break; default: break; } nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), val | NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING); } static void nv50_dac_save(struct drm_encoder *encoder) { NV_ERROR(encoder->dev, "!!\n"); } static void nv50_dac_restore(struct drm_encoder *encoder) { NV_ERROR(encoder->dev, "!!\n"); } static bool nv50_dac_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct nouveau_connector *connector; NV_DEBUG_KMS(encoder->dev, "or %d\n", nv_encoder->or); connector = nouveau_encoder_connector_get(nv_encoder); if (!connector) { NV_ERROR(encoder->dev, "Encoder has no connector\n"); return false; } if (connector->scaling_mode != DRM_MODE_SCALE_NONE && connector->native_mode) drm_mode_copy(adjusted_mode, connector->native_mode); return true; } static void nv50_dac_commit(struct drm_encoder *encoder) { } static void nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct drm_device *dev = encoder->dev; struct nouveau_channel *evo = nv50_display(dev)->master; struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc); uint32_t mode_ctl = 0, mode_ctl2 = 0; int ret; NV_DEBUG_KMS(dev, "or %d type %d crtc %d\n", nv_encoder->or, nv_encoder->dcb->type, crtc->index); nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON); if (crtc->index == 1) mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC1; else mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC0; /* Lacking a working tv-out, this is not a 100% sure. */ if (nv_encoder->dcb->type == OUTPUT_ANALOG) mode_ctl |= 0x40; else if (nv_encoder->dcb->type == OUTPUT_TV) mode_ctl |= 0x100; if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NHSYNC; if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NVSYNC; ret = RING_SPACE(evo, 3); if (ret) { NV_ERROR(dev, "no space while connecting DAC\n"); return; } BEGIN_NV04(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 2); OUT_RING(evo, mode_ctl); OUT_RING(evo, mode_ctl2); nv_encoder->crtc = encoder->crtc; } static struct drm_crtc * nv50_dac_crtc_get(struct drm_encoder *encoder) { return nouveau_encoder(encoder)->crtc; } static const struct drm_encoder_helper_funcs nv50_dac_helper_funcs = { .dpms = nv50_dac_dpms, .save = nv50_dac_save, .restore = nv50_dac_restore, .mode_fixup = nv50_dac_mode_fixup, .prepare = nv50_dac_disconnect, .commit = nv50_dac_commit, .mode_set = nv50_dac_mode_set, .get_crtc = nv50_dac_crtc_get, .detect = nv50_dac_detect, .disable = nv50_dac_disconnect }; static void nv50_dac_destroy(struct drm_encoder *encoder) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); if (!encoder) return; NV_DEBUG_KMS(encoder->dev, "\n"); drm_encoder_cleanup(encoder); kfree(nv_encoder); } static const struct drm_encoder_funcs nv50_dac_encoder_funcs = { .destroy = nv50_dac_destroy, }; int nv50_dac_create(struct drm_connector *connector, struct dcb_entry *entry) { struct nouveau_encoder *nv_encoder; struct drm_encoder *encoder; nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); if (!nv_encoder) return -ENOMEM; encoder = to_drm_encoder(nv_encoder); nv_encoder->dcb = entry; nv_encoder->or = ffs(entry->or) - 1; drm_encoder_init(connector->dev, encoder, &nv50_dac_encoder_funcs, DRM_MODE_ENCODER_DAC); drm_encoder_helper_add(encoder, &nv50_dac_helper_funcs); encoder->possible_crtcs = entry->heads; encoder->possible_clones = 0; drm_mode_connector_attach_encoder(connector, encoder); return 0; }
kv193/buildroot
linux/linux-kernel/drivers/gpu/drm/nouveau/nv50_dac.c
C
gpl-2.0
9,006
/* Copyright © 2001-2004 World Wide Web Consortium, (Massachusetts Institute of Technology, European Research Consortium for Informatics and Mathematics, Keio University). All Rights Reserved. This work is distributed under the W3C® Software License [1] in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. [1] http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231 */ /** * Gets URI that identifies the test. * @return uri identifier of test */ function getTargetURI() { return "http://www.w3.org/2001/DOM-Test-Suite/level2/html/HTMLTableRowElement08"; } var docsLoaded = -1000000; var builder = null; // // This function is called by the testing framework before // running the test suite. // // If there are no configuration exceptions, asynchronous // document loading is started. Otherwise, the status // is set to complete and the exception is immediately // raised when entering the body of the test. // function setUpPage() { setUpPageStatus = 'running'; try { // // creates test document builder, may throw exception // builder = createConfiguredBuilder(); docsLoaded = 0; var docRef = null; if (typeof(this.doc) != 'undefined') { docRef = this.doc; } docsLoaded += preload(docRef, "doc", "tablerow"); if (docsLoaded == 1) { setUpPageStatus = 'complete'; } } catch(ex) { catchInitializationError(builder, ex); setUpPageStatus = 'complete'; } } // // This method is called on the completion of // each asychronous load started in setUpTests. // // When every synchronous loaded document has completed, // the page status is changed which allows the // body of the test to be executed. function loadComplete() { if (++docsLoaded == 1) { setUpPageStatus = 'complete'; } } /** * The ch attribute specifies the alignment character for cells in a column. Retrieve the char attribute of the second TR element and examine its value. * @author NIST * @author Mary Brady * @see http://www.w3.org/TR/1998/REC-DOM-Level-1-19981001/level-one-html#ID-16230502 */ function HTMLTableRowElement08() { var success; if(checkInitialization(builder, "HTMLTableRowElement08") != null) return; var nodeList; var testNode; var vch; var doc; var docRef = null; if (typeof(this.doc) != 'undefined') { docRef = this.doc; } doc = load(docRef, "doc", "tablerow"); nodeList = doc.getElementsByTagName("tr"); assertSize("Asize",5,nodeList); testNode = nodeList.item(1); vch = testNode.ch; assertEquals("chLink","*",vch); } function runTest() { HTMLTableRowElement08(); }
danialbehzadi/Nokia-RM-1013-2.0.0.11
webkit/LayoutTests/dom/xhtml/level2/html/HTMLTableRowElement08.js
JavaScript
gpl-3.0
2,931
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import java.util.List; import java.util.Map; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; /** * XAttrStorage is used to read and set xattrs for an inode. */ @InterfaceAudience.Private public class XAttrStorage { private static final Map<String, String> internedNames = Maps.newHashMap(); /** * Reads the existing extended attributes of an inode. If the * inode does not have an <code>XAttr</code>, then this method * returns an empty list. * <p/> * Must be called while holding the FSDirectory read lock. * * @param inode INode to read * @param snapshotId * @return List<XAttr> <code>XAttr</code> list. */ public static List<XAttr> readINodeXAttrs(INode inode, int snapshotId) { XAttrFeature f = inode.getXAttrFeature(snapshotId); return f == null ? ImmutableList.<XAttr> of() : f.getXAttrs(); } /** * Reads the existing extended attributes of an inode. * <p/> * Must be called while holding the FSDirectory read lock. * * @param inodeAttr INodeAttributes to read. * @return List<XAttr> <code>XAttr</code> list. */ public static List<XAttr> readINodeXAttrs(INodeAttributes inodeAttr) { XAttrFeature f = inodeAttr.getXAttrFeature(); return f == null ? ImmutableList.<XAttr> of() : f.getXAttrs(); } /** * Update xattrs of inode. * <p/> * Must be called while holding the FSDirectory write lock. * * @param inode INode to update * @param xAttrs to update xAttrs. * @param snapshotId id of the latest snapshot of the inode */ public static void updateINodeXAttrs(INode inode, List<XAttr> xAttrs, int snapshotId) throws QuotaExceededException { if (xAttrs == null || xAttrs.isEmpty()) { if (inode.getXAttrFeature() != null) { inode.removeXAttrFeature(snapshotId); } return; } // Dedupe the xAttr name and save them into a new interned list List<XAttr> internedXAttrs = Lists.newArrayListWithCapacity(xAttrs.size()); for (XAttr xAttr : xAttrs) { final String name = xAttr.getName(); String internedName = internedNames.get(name); if (internedName == null) { internedName = name; internedNames.put(internedName, internedName); } XAttr internedXAttr = new XAttr.Builder() .setName(internedName) .setNameSpace(xAttr.getNameSpace()) .setValue(xAttr.getValue()) .build(); internedXAttrs.add(internedXAttr); } // Save the list of interned xattrs ImmutableList<XAttr> newXAttrs = ImmutableList.copyOf(internedXAttrs); if (inode.getXAttrFeature() != null) { inode.removeXAttrFeature(snapshotId); } inode.addXAttrFeature(new XAttrFeature(newXAttrs), snapshotId); } }
messi49/hadoop
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrStorage.java
Java
apache-2.0
3,849
"no use strict"; ;(function(window) { if (typeof window.window != "undefined" && window.document) { return; } window.console = function() { var msgs = Array.prototype.slice.call(arguments, 0); postMessage({type: "log", data: msgs}); }; window.console.error = window.console.warn = window.console.log = window.console.trace = window.console; window.window = window; window.ace = window; window.onerror = function(message, file, line, col, err) { postMessage({type: "error", data: { message: message, file: file, line: line, col: col, stack: err.stack }}); }; window.normalizeModule = function(parentId, moduleName) { // normalize plugin requires if (moduleName.indexOf("!") !== -1) { var chunks = moduleName.split("!"); return window.normalizeModule(parentId, chunks[0]) + "!" + window.normalizeModule(parentId, chunks[1]); } // normalize relative requires if (moduleName.charAt(0) == ".") { var base = parentId.split("/").slice(0, -1).join("/"); moduleName = (base ? base + "/" : "") + moduleName; while(moduleName.indexOf(".") !== -1 && previous != moduleName) { var previous = moduleName; moduleName = moduleName.replace(/^\.\//, "").replace(/\/\.\//, "/").replace(/[^\/]+\/\.\.\//, ""); } } return moduleName; }; window.require = function(parentId, id) { if (!id) { id = parentId; parentId = null; } if (!id.charAt) throw new Error("worker.js require() accepts only (parentId, id) as arguments"); id = window.normalizeModule(parentId, id); var module = window.require.modules[id]; if (module) { if (!module.initialized) { module.initialized = true; module.exports = module.factory().exports; } return module.exports; } var chunks = id.split("/"); if (!window.require.tlns) return console.log("unable to load " + id); chunks[0] = window.require.tlns[chunks[0]] || chunks[0]; var path = chunks.join("/") + ".js"; window.require.id = id; importScripts(path); return window.require(parentId, id); }; window.require.modules = {}; window.require.tlns = {}; window.define = function(id, deps, factory) { if (arguments.length == 2) { factory = deps; if (typeof id != "string") { deps = id; id = window.require.id; } } else if (arguments.length == 1) { factory = id; deps = []; id = window.require.id; } if (typeof factory != "function") { window.require.modules[id] = { exports: factory, initialized: true }; return; } if (!deps.length) // If there is no dependencies, we inject 'require', 'exports' and // 'module' as dependencies, to provide CommonJS compatibility. deps = ['require', 'exports', 'module']; var req = function(childId) { return window.require(id, childId); }; window.require.modules[id] = { exports: {}, factory: function() { var module = this; var returnExports = factory.apply(this, deps.map(function(dep) { switch(dep) { // Because 'require', 'exports' and 'module' aren't actual // dependencies, we must handle them seperately. case 'require': return req; case 'exports': return module.exports; case 'module': return module; // But for all other dependencies, we can just go ahead and // require them. default: return req(dep); } })); if (returnExports) module.exports = returnExports; return module; } }; }; window.define.amd = {}; window.initBaseUrls = function initBaseUrls(topLevelNamespaces) { require.tlns = topLevelNamespaces; }; window.initSender = function initSender() { var EventEmitter = window.require("ace/lib/event_emitter").EventEmitter; var oop = window.require("ace/lib/oop"); var Sender = function() {}; (function() { oop.implement(this, EventEmitter); this.callback = function(data, callbackId) { postMessage({ type: "call", id: callbackId, data: data }); }; this.emit = function(name, data) { postMessage({ type: "event", name: name, data: data }); }; }).call(Sender.prototype); return new Sender(); }; var main = window.main = null; var sender = window.sender = null; window.onmessage = function(e) { var msg = e.data; if (msg.command) { if (main[msg.command]) main[msg.command].apply(main, msg.args); else throw new Error("Unknown command:" + msg.command); } else if (msg.init) { initBaseUrls(msg.tlns); require("ace/lib/es5-shim"); sender = window.sender = initSender(); var clazz = require(msg.module)[msg.classname]; main = window.main = new clazz(sender); } else if (msg.event && sender) { sender._signal(msg.event, msg.data); } }; })(this); define("ace/lib/oop",["require","exports","module"], function(require, exports, module) { "use strict"; exports.inherits = function(ctor, superCtor) { ctor.super_ = superCtor; ctor.prototype = Object.create(superCtor.prototype, { constructor: { value: ctor, enumerable: false, writable: true, configurable: true } }); }; exports.mixin = function(obj, mixin) { for (var key in mixin) { obj[key] = mixin[key]; } return obj; }; exports.implement = function(proto, mixin) { exports.mixin(proto, mixin); }; }); define("ace/lib/lang",["require","exports","module"], function(require, exports, module) { "use strict"; exports.last = function(a) { return a[a.length - 1]; }; exports.stringReverse = function(string) { return string.split("").reverse().join(""); }; exports.stringRepeat = function (string, count) { var result = ''; while (count > 0) { if (count & 1) result += string; if (count >>= 1) string += string; } return result; }; var trimBeginRegexp = /^\s\s*/; var trimEndRegexp = /\s\s*$/; exports.stringTrimLeft = function (string) { return string.replace(trimBeginRegexp, ''); }; exports.stringTrimRight = function (string) { return string.replace(trimEndRegexp, ''); }; exports.copyObject = function(obj) { var copy = {}; for (var key in obj) { copy[key] = obj[key]; } return copy; }; exports.copyArray = function(array){ var copy = []; for (var i=0, l=array.length; i<l; i++) { if (array[i] && typeof array[i] == "object") copy[i] = this.copyObject( array[i] ); else copy[i] = array[i]; } return copy; }; exports.deepCopy = function (obj) { if (typeof obj !== "object" || !obj) return obj; var cons = obj.constructor; if (cons === RegExp) return obj; var copy = cons(); for (var key in obj) { if (typeof obj[key] === "object") { copy[key] = exports.deepCopy(obj[key]); } else { copy[key] = obj[key]; } } return copy; }; exports.arrayToMap = function(arr) { var map = {}; for (var i=0; i<arr.length; i++) { map[arr[i]] = 1; } return map; }; exports.createMap = function(props) { var map = Object.create(null); for (var i in props) { map[i] = props[i]; } return map; }; exports.arrayRemove = function(array, value) { for (var i = 0; i <= array.length; i++) { if (value === array[i]) { array.splice(i, 1); } } }; exports.escapeRegExp = function(str) { return str.replace(/([.*+?^${}()|[\]\/\\])/g, '\\$1'); }; exports.escapeHTML = function(str) { return str.replace(/&/g, "&#38;").replace(/"/g, "&#34;").replace(/'/g, "&#39;").replace(/</g, "&#60;"); }; exports.getMatchOffsets = function(string, regExp) { var matches = []; string.replace(regExp, function(str) { matches.push({ offset: arguments[arguments.length-2], length: str.length }); }); return matches; }; exports.deferredCall = function(fcn) { var timer = null; var callback = function() { timer = null; fcn(); }; var deferred = function(timeout) { deferred.cancel(); timer = setTimeout(callback, timeout || 0); return deferred; }; deferred.schedule = deferred; deferred.call = function() { this.cancel(); fcn(); return deferred; }; deferred.cancel = function() { clearTimeout(timer); timer = null; return deferred; }; deferred.isPending = function() { return timer; }; return deferred; }; exports.delayedCall = function(fcn, defaultTimeout) { var timer = null; var callback = function() { timer = null; fcn(); }; var _self = function(timeout) { if (timer == null) timer = setTimeout(callback, timeout || defaultTimeout); }; _self.delay = function(timeout) { timer && clearTimeout(timer); timer = setTimeout(callback, timeout || defaultTimeout); }; _self.schedule = _self; _self.call = function() { this.cancel(); fcn(); }; _self.cancel = function() { timer && clearTimeout(timer); timer = null; }; _self.isPending = function() { return timer; }; return _self; }; }); define("ace/lib/event_emitter",["require","exports","module"], function(require, exports, module) { "use strict"; var EventEmitter = {}; var stopPropagation = function() { this.propagationStopped = true; }; var preventDefault = function() { this.defaultPrevented = true; }; EventEmitter._emit = EventEmitter._dispatchEvent = function(eventName, e) { this._eventRegistry || (this._eventRegistry = {}); this._defaultHandlers || (this._defaultHandlers = {}); var listeners = this._eventRegistry[eventName] || []; var defaultHandler = this._defaultHandlers[eventName]; if (!listeners.length && !defaultHandler) return; if (typeof e != "object" || !e) e = {}; if (!e.type) e.type = eventName; if (!e.stopPropagation) e.stopPropagation = stopPropagation; if (!e.preventDefault) e.preventDefault = preventDefault; listeners = listeners.slice(); for (var i=0; i<listeners.length; i++) { listeners[i](e, this); if (e.propagationStopped) break; } if (defaultHandler && !e.defaultPrevented) return defaultHandler(e, this); }; EventEmitter._signal = function(eventName, e) { var listeners = (this._eventRegistry || {})[eventName]; if (!listeners) return; listeners = listeners.slice(); for (var i=0; i<listeners.length; i++) listeners[i](e, this); }; EventEmitter.once = function(eventName, callback) { var _self = this; callback && this.addEventListener(eventName, function newCallback() { _self.removeEventListener(eventName, newCallback); callback.apply(null, arguments); }); }; EventEmitter.setDefaultHandler = function(eventName, callback) { var handlers = this._defaultHandlers if (!handlers) handlers = this._defaultHandlers = {_disabled_: {}}; if (handlers[eventName]) { var old = handlers[eventName]; var disabled = handlers._disabled_[eventName]; if (!disabled) handlers._disabled_[eventName] = disabled = []; disabled.push(old); var i = disabled.indexOf(callback); if (i != -1) disabled.splice(i, 1); } handlers[eventName] = callback; }; EventEmitter.removeDefaultHandler = function(eventName, callback) { var handlers = this._defaultHandlers if (!handlers) return; var disabled = handlers._disabled_[eventName]; if (handlers[eventName] == callback) { var old = handlers[eventName]; if (disabled) this.setDefaultHandler(eventName, disabled.pop()); } else if (disabled) { var i = disabled.indexOf(callback); if (i != -1) disabled.splice(i, 1); } }; EventEmitter.on = EventEmitter.addEventListener = function(eventName, callback, capturing) { this._eventRegistry = this._eventRegistry || {}; var listeners = this._eventRegistry[eventName]; if (!listeners) listeners = this._eventRegistry[eventName] = []; if (listeners.indexOf(callback) == -1) listeners[capturing ? "unshift" : "push"](callback); return callback; }; EventEmitter.off = EventEmitter.removeListener = EventEmitter.removeEventListener = function(eventName, callback) { this._eventRegistry = this._eventRegistry || {}; var listeners = this._eventRegistry[eventName]; if (!listeners) return; var index = listeners.indexOf(callback); if (index !== -1) listeners.splice(index, 1); }; EventEmitter.removeAllListeners = function(eventName) { if (this._eventRegistry) this._eventRegistry[eventName] = []; }; exports.EventEmitter = EventEmitter; }); define("ace/range",["require","exports","module"], function(require, exports, module) { "use strict"; var comparePoints = function(p1, p2) { return p1.row - p2.row || p1.column - p2.column; }; var Range = function(startRow, startColumn, endRow, endColumn) { this.start = { row: startRow, column: startColumn }; this.end = { row: endRow, column: endColumn }; }; (function() { this.isEqual = function(range) { return this.start.row === range.start.row && this.end.row === range.end.row && this.start.column === range.start.column && this.end.column === range.end.column; }; this.toString = function() { return ("Range: [" + this.start.row + "/" + this.start.column + "] -> [" + this.end.row + "/" + this.end.column + "]"); }; this.contains = function(row, column) { return this.compare(row, column) == 0; }; this.compareRange = function(range) { var cmp, end = range.end, start = range.start; cmp = this.compare(end.row, end.column); if (cmp == 1) { cmp = this.compare(start.row, start.column); if (cmp == 1) { return 2; } else if (cmp == 0) { return 1; } else { return 0; } } else if (cmp == -1) { return -2; } else { cmp = this.compare(start.row, start.column); if (cmp == -1) { return -1; } else if (cmp == 1) { return 42; } else { return 0; } } }; this.comparePoint = function(p) { return this.compare(p.row, p.column); }; this.containsRange = function(range) { return this.comparePoint(range.start) == 0 && this.comparePoint(range.end) == 0; }; this.intersects = function(range) { var cmp = this.compareRange(range); return (cmp == -1 || cmp == 0 || cmp == 1); }; this.isEnd = function(row, column) { return this.end.row == row && this.end.column == column; }; this.isStart = function(row, column) { return this.start.row == row && this.start.column == column; }; this.setStart = function(row, column) { if (typeof row == "object") { this.start.column = row.column; this.start.row = row.row; } else { this.start.row = row; this.start.column = column; } }; this.setEnd = function(row, column) { if (typeof row == "object") { this.end.column = row.column; this.end.row = row.row; } else { this.end.row = row; this.end.column = column; } }; this.inside = function(row, column) { if (this.compare(row, column) == 0) { if (this.isEnd(row, column) || this.isStart(row, column)) { return false; } else { return true; } } return false; }; this.insideStart = function(row, column) { if (this.compare(row, column) == 0) { if (this.isEnd(row, column)) { return false; } else { return true; } } return false; }; this.insideEnd = function(row, column) { if (this.compare(row, column) == 0) { if (this.isStart(row, column)) { return false; } else { return true; } } return false; }; this.compare = function(row, column) { if (!this.isMultiLine()) { if (row === this.start.row) { return column < this.start.column ? -1 : (column > this.end.column ? 1 : 0); }; } if (row < this.start.row) return -1; if (row > this.end.row) return 1; if (this.start.row === row) return column >= this.start.column ? 0 : -1; if (this.end.row === row) return column <= this.end.column ? 0 : 1; return 0; }; this.compareStart = function(row, column) { if (this.start.row == row && this.start.column == column) { return -1; } else { return this.compare(row, column); } }; this.compareEnd = function(row, column) { if (this.end.row == row && this.end.column == column) { return 1; } else { return this.compare(row, column); } }; this.compareInside = function(row, column) { if (this.end.row == row && this.end.column == column) { return 1; } else if (this.start.row == row && this.start.column == column) { return -1; } else { return this.compare(row, column); } }; this.clipRows = function(firstRow, lastRow) { if (this.end.row > lastRow) var end = {row: lastRow + 1, column: 0}; else if (this.end.row < firstRow) var end = {row: firstRow, column: 0}; if (this.start.row > lastRow) var start = {row: lastRow + 1, column: 0}; else if (this.start.row < firstRow) var start = {row: firstRow, column: 0}; return Range.fromPoints(start || this.start, end || this.end); }; this.extend = function(row, column) { var cmp = this.compare(row, column); if (cmp == 0) return this; else if (cmp == -1) var start = {row: row, column: column}; else var end = {row: row, column: column}; return Range.fromPoints(start || this.start, end || this.end); }; this.isEmpty = function() { return (this.start.row === this.end.row && this.start.column === this.end.column); }; this.isMultiLine = function() { return (this.start.row !== this.end.row); }; this.clone = function() { return Range.fromPoints(this.start, this.end); }; this.collapseRows = function() { if (this.end.column == 0) return new Range(this.start.row, 0, Math.max(this.start.row, this.end.row-1), 0) else return new Range(this.start.row, 0, this.end.row, 0) }; this.toScreenRange = function(session) { var screenPosStart = session.documentToScreenPosition(this.start); var screenPosEnd = session.documentToScreenPosition(this.end); return new Range( screenPosStart.row, screenPosStart.column, screenPosEnd.row, screenPosEnd.column ); }; this.moveBy = function(row, column) { this.start.row += row; this.start.column += column; this.end.row += row; this.end.column += column; }; }).call(Range.prototype); Range.fromPoints = function(start, end) { return new Range(start.row, start.column, end.row, end.column); }; Range.comparePoints = comparePoints; Range.comparePoints = function(p1, p2) { return p1.row - p2.row || p1.column - p2.column; }; exports.Range = Range; }); define("ace/anchor",["require","exports","module","ace/lib/oop","ace/lib/event_emitter"], function(require, exports, module) { "use strict"; var oop = require("./lib/oop"); var EventEmitter = require("./lib/event_emitter").EventEmitter; var Anchor = exports.Anchor = function(doc, row, column) { this.$onChange = this.onChange.bind(this); this.attach(doc); if (typeof column == "undefined") this.setPosition(row.row, row.column); else this.setPosition(row, column); }; (function() { oop.implement(this, EventEmitter); this.getPosition = function() { return this.$clipPositionToDocument(this.row, this.column); }; this.getDocument = function() { return this.document; }; this.$insertRight = false; this.onChange = function(e) { var delta = e.data; var range = delta.range; if (range.start.row == range.end.row && range.start.row != this.row) return; if (range.start.row > this.row) return; if (range.start.row == this.row && range.start.column > this.column) return; var row = this.row; var column = this.column; var start = range.start; var end = range.end; if (delta.action === "insertText") { if (start.row === row && start.column <= column) { if (start.column === column && this.$insertRight) { } else if (start.row === end.row) { column += end.column - start.column; } else { column -= start.column; row += end.row - start.row; } } else if (start.row !== end.row && start.row < row) { row += end.row - start.row; } } else if (delta.action === "insertLines") { if (start.row === row && column === 0 && this.$insertRight) { } else if (start.row <= row) { row += end.row - start.row; } } else if (delta.action === "removeText") { if (start.row === row && start.column < column) { if (end.column >= column) column = start.column; else column = Math.max(0, column - (end.column - start.column)); } else if (start.row !== end.row && start.row < row) { if (end.row === row) column = Math.max(0, column - end.column) + start.column; row -= (end.row - start.row); } else if (end.row === row) { row -= end.row - start.row; column = Math.max(0, column - end.column) + start.column; } } else if (delta.action == "removeLines") { if (start.row <= row) { if (end.row <= row) row -= end.row - start.row; else { row = start.row; column = 0; } } } this.setPosition(row, column, true); }; this.setPosition = function(row, column, noClip) { var pos; if (noClip) { pos = { row: row, column: column }; } else { pos = this.$clipPositionToDocument(row, column); } if (this.row == pos.row && this.column == pos.column) return; var old = { row: this.row, column: this.column }; this.row = pos.row; this.column = pos.column; this._signal("change", { old: old, value: pos }); }; this.detach = function() { this.document.removeEventListener("change", this.$onChange); }; this.attach = function(doc) { this.document = doc || this.document; this.document.on("change", this.$onChange); }; this.$clipPositionToDocument = function(row, column) { var pos = {}; if (row >= this.document.getLength()) { pos.row = Math.max(0, this.document.getLength() - 1); pos.column = this.document.getLine(pos.row).length; } else if (row < 0) { pos.row = 0; pos.column = 0; } else { pos.row = row; pos.column = Math.min(this.document.getLine(pos.row).length, Math.max(0, column)); } if (column < 0) pos.column = 0; return pos; }; }).call(Anchor.prototype); }); define("ace/document",["require","exports","module","ace/lib/oop","ace/lib/event_emitter","ace/range","ace/anchor"], function(require, exports, module) { "use strict"; var oop = require("./lib/oop"); var EventEmitter = require("./lib/event_emitter").EventEmitter; var Range = require("./range").Range; var Anchor = require("./anchor").Anchor; var Document = function(text) { this.$lines = []; if (text.length === 0) { this.$lines = [""]; } else if (Array.isArray(text)) { this._insertLines(0, text); } else { this.insert({row: 0, column:0}, text); } }; (function() { oop.implement(this, EventEmitter); this.setValue = function(text) { var len = this.getLength(); this.remove(new Range(0, 0, len, this.getLine(len-1).length)); this.insert({row: 0, column:0}, text); }; this.getValue = function() { return this.getAllLines().join(this.getNewLineCharacter()); }; this.createAnchor = function(row, column) { return new Anchor(this, row, column); }; if ("aaa".split(/a/).length === 0) this.$split = function(text) { return text.replace(/\r\n|\r/g, "\n").split("\n"); }; else this.$split = function(text) { return text.split(/\r\n|\r|\n/); }; this.$detectNewLine = function(text) { var match = text.match(/^.*?(\r\n|\r|\n)/m); this.$autoNewLine = match ? match[1] : "\n"; this._signal("changeNewLineMode"); }; this.getNewLineCharacter = function() { switch (this.$newLineMode) { case "windows": return "\r\n"; case "unix": return "\n"; default: return this.$autoNewLine || "\n"; } }; this.$autoNewLine = ""; this.$newLineMode = "auto"; this.setNewLineMode = function(newLineMode) { if (this.$newLineMode === newLineMode) return; this.$newLineMode = newLineMode; this._signal("changeNewLineMode"); }; this.getNewLineMode = function() { return this.$newLineMode; }; this.isNewLine = function(text) { return (text == "\r\n" || text == "\r" || text == "\n"); }; this.getLine = function(row) { return this.$lines[row] || ""; }; this.getLines = function(firstRow, lastRow) { return this.$lines.slice(firstRow, lastRow + 1); }; this.getAllLines = function() { return this.getLines(0, this.getLength()); }; this.getLength = function() { return this.$lines.length; }; this.getTextRange = function(range) { if (range.start.row == range.end.row) { return this.getLine(range.start.row) .substring(range.start.column, range.end.column); } var lines = this.getLines(range.start.row, range.end.row); lines[0] = (lines[0] || "").substring(range.start.column); var l = lines.length - 1; if (range.end.row - range.start.row == l) lines[l] = lines[l].substring(0, range.end.column); return lines.join(this.getNewLineCharacter()); }; this.$clipPosition = function(position) { var length = this.getLength(); if (position.row >= length) { position.row = Math.max(0, length - 1); position.column = this.getLine(length-1).length; } else if (position.row < 0) position.row = 0; return position; }; this.insert = function(position, text) { if (!text || text.length === 0) return position; position = this.$clipPosition(position); if (this.getLength() <= 1) this.$detectNewLine(text); var lines = this.$split(text); var firstLine = lines.splice(0, 1)[0]; var lastLine = lines.length == 0 ? null : lines.splice(lines.length - 1, 1)[0]; position = this.insertInLine(position, firstLine); if (lastLine !== null) { position = this.insertNewLine(position); // terminate first line position = this._insertLines(position.row, lines); position = this.insertInLine(position, lastLine || ""); } return position; }; this.insertLines = function(row, lines) { if (row >= this.getLength()) return this.insert({row: row, column: 0}, "\n" + lines.join("\n")); return this._insertLines(Math.max(row, 0), lines); }; this._insertLines = function(row, lines) { if (lines.length == 0) return {row: row, column: 0}; while (lines.length > 20000) { var end = this._insertLines(row, lines.slice(0, 20000)); lines = lines.slice(20000); row = end.row; } var args = [row, 0]; args.push.apply(args, lines); this.$lines.splice.apply(this.$lines, args); var range = new Range(row, 0, row + lines.length, 0); var delta = { action: "insertLines", range: range, lines: lines }; this._signal("change", { data: delta }); return range.end; }; this.insertNewLine = function(position) { position = this.$clipPosition(position); var line = this.$lines[position.row] || ""; this.$lines[position.row] = line.substring(0, position.column); this.$lines.splice(position.row + 1, 0, line.substring(position.column, line.length)); var end = { row : position.row + 1, column : 0 }; var delta = { action: "insertText", range: Range.fromPoints(position, end), text: this.getNewLineCharacter() }; this._signal("change", { data: delta }); return end; }; this.insertInLine = function(position, text) { if (text.length == 0) return position; var line = this.$lines[position.row] || ""; this.$lines[position.row] = line.substring(0, position.column) + text + line.substring(position.column); var end = { row : position.row, column : position.column + text.length }; var delta = { action: "insertText", range: Range.fromPoints(position, end), text: text }; this._signal("change", { data: delta }); return end; }; this.remove = function(range) { if (!(range instanceof Range)) range = Range.fromPoints(range.start, range.end); range.start = this.$clipPosition(range.start); range.end = this.$clipPosition(range.end); if (range.isEmpty()) return range.start; var firstRow = range.start.row; var lastRow = range.end.row; if (range.isMultiLine()) { var firstFullRow = range.start.column == 0 ? firstRow : firstRow + 1; var lastFullRow = lastRow - 1; if (range.end.column > 0) this.removeInLine(lastRow, 0, range.end.column); if (lastFullRow >= firstFullRow) this._removeLines(firstFullRow, lastFullRow); if (firstFullRow != firstRow) { this.removeInLine(firstRow, range.start.column, this.getLine(firstRow).length); this.removeNewLine(range.start.row); } } else { this.removeInLine(firstRow, range.start.column, range.end.column); } return range.start; }; this.removeInLine = function(row, startColumn, endColumn) { if (startColumn == endColumn) return; var range = new Range(row, startColumn, row, endColumn); var line = this.getLine(row); var removed = line.substring(startColumn, endColumn); var newLine = line.substring(0, startColumn) + line.substring(endColumn, line.length); this.$lines.splice(row, 1, newLine); var delta = { action: "removeText", range: range, text: removed }; this._signal("change", { data: delta }); return range.start; }; this.removeLines = function(firstRow, lastRow) { if (firstRow < 0 || lastRow >= this.getLength()) return this.remove(new Range(firstRow, 0, lastRow + 1, 0)); return this._removeLines(firstRow, lastRow); }; this._removeLines = function(firstRow, lastRow) { var range = new Range(firstRow, 0, lastRow + 1, 0); var removed = this.$lines.splice(firstRow, lastRow - firstRow + 1); var delta = { action: "removeLines", range: range, nl: this.getNewLineCharacter(), lines: removed }; this._signal("change", { data: delta }); return removed; }; this.removeNewLine = function(row) { var firstLine = this.getLine(row); var secondLine = this.getLine(row+1); var range = new Range(row, firstLine.length, row+1, 0); var line = firstLine + secondLine; this.$lines.splice(row, 2, line); var delta = { action: "removeText", range: range, text: this.getNewLineCharacter() }; this._signal("change", { data: delta }); }; this.replace = function(range, text) { if (!(range instanceof Range)) range = Range.fromPoints(range.start, range.end); if (text.length == 0 && range.isEmpty()) return range.start; if (text == this.getTextRange(range)) return range.end; this.remove(range); if (text) { var end = this.insert(range.start, text); } else { end = range.start; } return end; }; this.applyDeltas = function(deltas) { for (var i=0; i<deltas.length; i++) { var delta = deltas[i]; var range = Range.fromPoints(delta.range.start, delta.range.end); if (delta.action == "insertLines") this.insertLines(range.start.row, delta.lines); else if (delta.action == "insertText") this.insert(range.start, delta.text); else if (delta.action == "removeLines") this._removeLines(range.start.row, range.end.row - 1); else if (delta.action == "removeText") this.remove(range); } }; this.revertDeltas = function(deltas) { for (var i=deltas.length-1; i>=0; i--) { var delta = deltas[i]; var range = Range.fromPoints(delta.range.start, delta.range.end); if (delta.action == "insertLines") this._removeLines(range.start.row, range.end.row - 1); else if (delta.action == "insertText") this.remove(range); else if (delta.action == "removeLines") this._insertLines(range.start.row, delta.lines); else if (delta.action == "removeText") this.insert(range.start, delta.text); } }; this.indexToPosition = function(index, startRow) { var lines = this.$lines || this.getAllLines(); var newlineLength = this.getNewLineCharacter().length; for (var i = startRow || 0, l = lines.length; i < l; i++) { index -= lines[i].length + newlineLength; if (index < 0) return {row: i, column: index + lines[i].length + newlineLength}; } return {row: l-1, column: lines[l-1].length}; }; this.positionToIndex = function(pos, startRow) { var lines = this.$lines || this.getAllLines(); var newlineLength = this.getNewLineCharacter().length; var index = 0; var row = Math.min(pos.row, lines.length); for (var i = startRow || 0; i < row; ++i) index += lines[i].length + newlineLength; return index + pos.column; }; }).call(Document.prototype); exports.Document = Document; }); define("ace/worker/mirror",["require","exports","module","ace/document","ace/lib/lang"], function(require, exports, module) { "use strict"; var Document = require("../document").Document; var lang = require("../lib/lang"); var Mirror = exports.Mirror = function(sender) { this.sender = sender; var doc = this.doc = new Document(""); var deferredUpdate = this.deferredUpdate = lang.delayedCall(this.onUpdate.bind(this)); var _self = this; sender.on("change", function(e) { doc.applyDeltas(e.data); if (_self.$timeout) return deferredUpdate.schedule(_self.$timeout); _self.onUpdate(); }); }; (function() { this.$timeout = 500; this.setTimeout = function(timeout) { this.$timeout = timeout; }; this.setValue = function(value) { this.doc.setValue(value); this.deferredUpdate.schedule(this.$timeout); }; this.getValue = function(callbackId) { this.sender.callback(this.doc.getValue(), callbackId); }; this.onUpdate = function() { }; this.isPending = function() { return this.deferredUpdate.isPending(); }; }).call(Mirror.prototype); }); define("ace/mode/css/csslint",["require","exports","module"], function(require, exports, module) { var parserlib = {}; (function(){ function EventTarget(){ this._listeners = {}; } EventTarget.prototype = { constructor: EventTarget, addListener: function(type, listener){ if (!this._listeners[type]){ this._listeners[type] = []; } this._listeners[type].push(listener); }, fire: function(event){ if (typeof event == "string"){ event = { type: event }; } if (typeof event.target != "undefined"){ event.target = this; } if (typeof event.type == "undefined"){ throw new Error("Event object missing 'type' property."); } if (this._listeners[event.type]){ var listeners = this._listeners[event.type].concat(); for (var i=0, len=listeners.length; i < len; i++){ listeners[i].call(this, event); } } }, removeListener: function(type, listener){ if (this._listeners[type]){ var listeners = this._listeners[type]; for (var i=0, len=listeners.length; i < len; i++){ if (listeners[i] === listener){ listeners.splice(i, 1); break; } } } } }; function StringReader(text){ this._input = text.replace(/\n\r?/g, "\n"); this._line = 1; this._col = 1; this._cursor = 0; } StringReader.prototype = { constructor: StringReader, getCol: function(){ return this._col; }, getLine: function(){ return this._line ; }, eof: function(){ return (this._cursor == this._input.length); }, peek: function(count){ var c = null; count = (typeof count == "undefined" ? 1 : count); if (this._cursor < this._input.length){ c = this._input.charAt(this._cursor + count - 1); } return c; }, read: function(){ var c = null; if (this._cursor < this._input.length){ if (this._input.charAt(this._cursor) == "\n"){ this._line++; this._col=1; } else { this._col++; } c = this._input.charAt(this._cursor++); } return c; }, mark: function(){ this._bookmark = { cursor: this._cursor, line: this._line, col: this._col }; }, reset: function(){ if (this._bookmark){ this._cursor = this._bookmark.cursor; this._line = this._bookmark.line; this._col = this._bookmark.col; delete this._bookmark; } }, readTo: function(pattern){ var buffer = "", c; while (buffer.length < pattern.length || buffer.lastIndexOf(pattern) != buffer.length - pattern.length){ c = this.read(); if (c){ buffer += c; } else { throw new Error("Expected \"" + pattern + "\" at line " + this._line + ", col " + this._col + "."); } } return buffer; }, readWhile: function(filter){ var buffer = "", c = this.read(); while(c !== null && filter(c)){ buffer += c; c = this.read(); } return buffer; }, readMatch: function(matcher){ var source = this._input.substring(this._cursor), value = null; if (typeof matcher == "string"){ if (source.indexOf(matcher) === 0){ value = this.readCount(matcher.length); } } else if (matcher instanceof RegExp){ if (matcher.test(source)){ value = this.readCount(RegExp.lastMatch.length); } } return value; }, readCount: function(count){ var buffer = ""; while(count--){ buffer += this.read(); } return buffer; } }; function SyntaxError(message, line, col){ this.col = col; this.line = line; this.message = message; } SyntaxError.prototype = new Error(); function SyntaxUnit(text, line, col, type){ this.col = col; this.line = line; this.text = text; this.type = type; } SyntaxUnit.fromToken = function(token){ return new SyntaxUnit(token.value, token.startLine, token.startCol); }; SyntaxUnit.prototype = { constructor: SyntaxUnit, valueOf: function(){ return this.text; }, toString: function(){ return this.text; } }; function TokenStreamBase(input, tokenData){ this._reader = input ? new StringReader(input.toString()) : null; this._token = null; this._tokenData = tokenData; this._lt = []; this._ltIndex = 0; this._ltIndexCache = []; } TokenStreamBase.createTokenData = function(tokens){ var nameMap = [], typeMap = {}, tokenData = tokens.concat([]), i = 0, len = tokenData.length+1; tokenData.UNKNOWN = -1; tokenData.unshift({name:"EOF"}); for (; i < len; i++){ nameMap.push(tokenData[i].name); tokenData[tokenData[i].name] = i; if (tokenData[i].text){ typeMap[tokenData[i].text] = i; } } tokenData.name = function(tt){ return nameMap[tt]; }; tokenData.type = function(c){ return typeMap[c]; }; return tokenData; }; TokenStreamBase.prototype = { constructor: TokenStreamBase, match: function(tokenTypes, channel){ if (!(tokenTypes instanceof Array)){ tokenTypes = [tokenTypes]; } var tt = this.get(channel), i = 0, len = tokenTypes.length; while(i < len){ if (tt == tokenTypes[i++]){ return true; } } this.unget(); return false; }, mustMatch: function(tokenTypes, channel){ var token; if (!(tokenTypes instanceof Array)){ tokenTypes = [tokenTypes]; } if (!this.match.apply(this, arguments)){ token = this.LT(1); throw new SyntaxError("Expected " + this._tokenData[tokenTypes[0]].name + " at line " + token.startLine + ", col " + token.startCol + ".", token.startLine, token.startCol); } }, advance: function(tokenTypes, channel){ while(this.LA(0) !== 0 && !this.match(tokenTypes, channel)){ this.get(); } return this.LA(0); }, get: function(channel){ var tokenInfo = this._tokenData, reader = this._reader, value, i =0, len = tokenInfo.length, found = false, token, info; if (this._lt.length && this._ltIndex >= 0 && this._ltIndex < this._lt.length){ i++; this._token = this._lt[this._ltIndex++]; info = tokenInfo[this._token.type]; while((info.channel !== undefined && channel !== info.channel) && this._ltIndex < this._lt.length){ this._token = this._lt[this._ltIndex++]; info = tokenInfo[this._token.type]; i++; } if ((info.channel === undefined || channel === info.channel) && this._ltIndex <= this._lt.length){ this._ltIndexCache.push(i); return this._token.type; } } token = this._getToken(); if (token.type > -1 && !tokenInfo[token.type].hide){ token.channel = tokenInfo[token.type].channel; this._token = token; this._lt.push(token); this._ltIndexCache.push(this._lt.length - this._ltIndex + i); if (this._lt.length > 5){ this._lt.shift(); } if (this._ltIndexCache.length > 5){ this._ltIndexCache.shift(); } this._ltIndex = this._lt.length; } info = tokenInfo[token.type]; if (info && (info.hide || (info.channel !== undefined && channel !== info.channel))){ return this.get(channel); } else { return token.type; } }, LA: function(index){ var total = index, tt; if (index > 0){ if (index > 5){ throw new Error("Too much lookahead."); } while(total){ tt = this.get(); total--; } while(total < index){ this.unget(); total++; } } else if (index < 0){ if(this._lt[this._ltIndex+index]){ tt = this._lt[this._ltIndex+index].type; } else { throw new Error("Too much lookbehind."); } } else { tt = this._token.type; } return tt; }, LT: function(index){ this.LA(index); return this._lt[this._ltIndex+index-1]; }, peek: function(){ return this.LA(1); }, token: function(){ return this._token; }, tokenName: function(tokenType){ if (tokenType < 0 || tokenType > this._tokenData.length){ return "UNKNOWN_TOKEN"; } else { return this._tokenData[tokenType].name; } }, tokenType: function(tokenName){ return this._tokenData[tokenName] || -1; }, unget: function(){ if (this._ltIndexCache.length){ this._ltIndex -= this._ltIndexCache.pop();//--; this._token = this._lt[this._ltIndex - 1]; } else { throw new Error("Too much lookahead."); } } }; parserlib.util = { StringReader: StringReader, SyntaxError : SyntaxError, SyntaxUnit : SyntaxUnit, EventTarget : EventTarget, TokenStreamBase : TokenStreamBase }; })(); (function(){ var EventTarget = parserlib.util.EventTarget, TokenStreamBase = parserlib.util.TokenStreamBase, StringReader = parserlib.util.StringReader, SyntaxError = parserlib.util.SyntaxError, SyntaxUnit = parserlib.util.SyntaxUnit; var Colors = { aliceblue :"#f0f8ff", antiquewhite :"#faebd7", aqua :"#00ffff", aquamarine :"#7fffd4", azure :"#f0ffff", beige :"#f5f5dc", bisque :"#ffe4c4", black :"#000000", blanchedalmond :"#ffebcd", blue :"#0000ff", blueviolet :"#8a2be2", brown :"#a52a2a", burlywood :"#deb887", cadetblue :"#5f9ea0", chartreuse :"#7fff00", chocolate :"#d2691e", coral :"#ff7f50", cornflowerblue :"#6495ed", cornsilk :"#fff8dc", crimson :"#dc143c", cyan :"#00ffff", darkblue :"#00008b", darkcyan :"#008b8b", darkgoldenrod :"#b8860b", darkgray :"#a9a9a9", darkgrey :"#a9a9a9", darkgreen :"#006400", darkkhaki :"#bdb76b", darkmagenta :"#8b008b", darkolivegreen :"#556b2f", darkorange :"#ff8c00", darkorchid :"#9932cc", darkred :"#8b0000", darksalmon :"#e9967a", darkseagreen :"#8fbc8f", darkslateblue :"#483d8b", darkslategray :"#2f4f4f", darkslategrey :"#2f4f4f", darkturquoise :"#00ced1", darkviolet :"#9400d3", deeppink :"#ff1493", deepskyblue :"#00bfff", dimgray :"#696969", dimgrey :"#696969", dodgerblue :"#1e90ff", firebrick :"#b22222", floralwhite :"#fffaf0", forestgreen :"#228b22", fuchsia :"#ff00ff", gainsboro :"#dcdcdc", ghostwhite :"#f8f8ff", gold :"#ffd700", goldenrod :"#daa520", gray :"#808080", grey :"#808080", green :"#008000", greenyellow :"#adff2f", honeydew :"#f0fff0", hotpink :"#ff69b4", indianred :"#cd5c5c", indigo :"#4b0082", ivory :"#fffff0", khaki :"#f0e68c", lavender :"#e6e6fa", lavenderblush :"#fff0f5", lawngreen :"#7cfc00", lemonchiffon :"#fffacd", lightblue :"#add8e6", lightcoral :"#f08080", lightcyan :"#e0ffff", lightgoldenrodyellow :"#fafad2", lightgray :"#d3d3d3", lightgrey :"#d3d3d3", lightgreen :"#90ee90", lightpink :"#ffb6c1", lightsalmon :"#ffa07a", lightseagreen :"#20b2aa", lightskyblue :"#87cefa", lightslategray :"#778899", lightslategrey :"#778899", lightsteelblue :"#b0c4de", lightyellow :"#ffffe0", lime :"#00ff00", limegreen :"#32cd32", linen :"#faf0e6", magenta :"#ff00ff", maroon :"#800000", mediumaquamarine:"#66cdaa", mediumblue :"#0000cd", mediumorchid :"#ba55d3", mediumpurple :"#9370d8", mediumseagreen :"#3cb371", mediumslateblue :"#7b68ee", mediumspringgreen :"#00fa9a", mediumturquoise :"#48d1cc", mediumvioletred :"#c71585", midnightblue :"#191970", mintcream :"#f5fffa", mistyrose :"#ffe4e1", moccasin :"#ffe4b5", navajowhite :"#ffdead", navy :"#000080", oldlace :"#fdf5e6", olive :"#808000", olivedrab :"#6b8e23", orange :"#ffa500", orangered :"#ff4500", orchid :"#da70d6", palegoldenrod :"#eee8aa", palegreen :"#98fb98", paleturquoise :"#afeeee", palevioletred :"#d87093", papayawhip :"#ffefd5", peachpuff :"#ffdab9", peru :"#cd853f", pink :"#ffc0cb", plum :"#dda0dd", powderblue :"#b0e0e6", purple :"#800080", red :"#ff0000", rosybrown :"#bc8f8f", royalblue :"#4169e1", saddlebrown :"#8b4513", salmon :"#fa8072", sandybrown :"#f4a460", seagreen :"#2e8b57", seashell :"#fff5ee", sienna :"#a0522d", silver :"#c0c0c0", skyblue :"#87ceeb", slateblue :"#6a5acd", slategray :"#708090", slategrey :"#708090", snow :"#fffafa", springgreen :"#00ff7f", steelblue :"#4682b4", tan :"#d2b48c", teal :"#008080", thistle :"#d8bfd8", tomato :"#ff6347", turquoise :"#40e0d0", violet :"#ee82ee", wheat :"#f5deb3", white :"#ffffff", whitesmoke :"#f5f5f5", yellow :"#ffff00", yellowgreen :"#9acd32", activeBorder :"Active window border.", activecaption :"Active window caption.", appworkspace :"Background color of multiple document interface.", background :"Desktop background.", buttonface :"The face background color for 3-D elements that appear 3-D due to one layer of surrounding border.", buttonhighlight :"The color of the border facing the light source for 3-D elements that appear 3-D due to one layer of surrounding border.", buttonshadow :"The color of the border away from the light source for 3-D elements that appear 3-D due to one layer of surrounding border.", buttontext :"Text on push buttons.", captiontext :"Text in caption, size box, and scrollbar arrow box.", graytext :"Grayed (disabled) text. This color is set to #000 if the current display driver does not support a solid gray color.", greytext :"Greyed (disabled) text. This color is set to #000 if the current display driver does not support a solid grey color.", highlight :"Item(s) selected in a control.", highlighttext :"Text of item(s) selected in a control.", inactiveborder :"Inactive window border.", inactivecaption :"Inactive window caption.", inactivecaptiontext :"Color of text in an inactive caption.", infobackground :"Background color for tooltip controls.", infotext :"Text color for tooltip controls.", menu :"Menu background.", menutext :"Text in menus.", scrollbar :"Scroll bar gray area.", threeddarkshadow :"The color of the darker (generally outer) of the two borders away from the light source for 3-D elements that appear 3-D due to two concentric layers of surrounding border.", threedface :"The face background color for 3-D elements that appear 3-D due to two concentric layers of surrounding border.", threedhighlight :"The color of the lighter (generally outer) of the two borders facing the light source for 3-D elements that appear 3-D due to two concentric layers of surrounding border.", threedlightshadow :"The color of the darker (generally inner) of the two borders facing the light source for 3-D elements that appear 3-D due to two concentric layers of surrounding border.", threedshadow :"The color of the lighter (generally inner) of the two borders away from the light source for 3-D elements that appear 3-D due to two concentric layers of surrounding border.", window :"Window background.", windowframe :"Window frame.", windowtext :"Text in windows." }; function Combinator(text, line, col){ SyntaxUnit.call(this, text, line, col, Parser.COMBINATOR_TYPE); this.type = "unknown"; if (/^\s+$/.test(text)){ this.type = "descendant"; } else if (text == ">"){ this.type = "child"; } else if (text == "+"){ this.type = "adjacent-sibling"; } else if (text == "~"){ this.type = "sibling"; } } Combinator.prototype = new SyntaxUnit(); Combinator.prototype.constructor = Combinator; function MediaFeature(name, value){ SyntaxUnit.call(this, "(" + name + (value !== null ? ":" + value : "") + ")", name.startLine, name.startCol, Parser.MEDIA_FEATURE_TYPE); this.name = name; this.value = value; } MediaFeature.prototype = new SyntaxUnit(); MediaFeature.prototype.constructor = MediaFeature; function MediaQuery(modifier, mediaType, features, line, col){ SyntaxUnit.call(this, (modifier ? modifier + " ": "") + (mediaType ? mediaType : "") + (mediaType && features.length > 0 ? " and " : "") + features.join(" and "), line, col, Parser.MEDIA_QUERY_TYPE); this.modifier = modifier; this.mediaType = mediaType; this.features = features; } MediaQuery.prototype = new SyntaxUnit(); MediaQuery.prototype.constructor = MediaQuery; function Parser(options){ EventTarget.call(this); this.options = options || {}; this._tokenStream = null; } Parser.DEFAULT_TYPE = 0; Parser.COMBINATOR_TYPE = 1; Parser.MEDIA_FEATURE_TYPE = 2; Parser.MEDIA_QUERY_TYPE = 3; Parser.PROPERTY_NAME_TYPE = 4; Parser.PROPERTY_VALUE_TYPE = 5; Parser.PROPERTY_VALUE_PART_TYPE = 6; Parser.SELECTOR_TYPE = 7; Parser.SELECTOR_PART_TYPE = 8; Parser.SELECTOR_SUB_PART_TYPE = 9; Parser.prototype = function(){ var proto = new EventTarget(), //new prototype prop, additions = { constructor: Parser, DEFAULT_TYPE : 0, COMBINATOR_TYPE : 1, MEDIA_FEATURE_TYPE : 2, MEDIA_QUERY_TYPE : 3, PROPERTY_NAME_TYPE : 4, PROPERTY_VALUE_TYPE : 5, PROPERTY_VALUE_PART_TYPE : 6, SELECTOR_TYPE : 7, SELECTOR_PART_TYPE : 8, SELECTOR_SUB_PART_TYPE : 9, _stylesheet: function(){ var tokenStream = this._tokenStream, charset = null, count, token, tt; this.fire("startstylesheet"); this._charset(); this._skipCruft(); while (tokenStream.peek() == Tokens.IMPORT_SYM){ this._import(); this._skipCruft(); } while (tokenStream.peek() == Tokens.NAMESPACE_SYM){ this._namespace(); this._skipCruft(); } tt = tokenStream.peek(); while(tt > Tokens.EOF){ try { switch(tt){ case Tokens.MEDIA_SYM: this._media(); this._skipCruft(); break; case Tokens.PAGE_SYM: this._page(); this._skipCruft(); break; case Tokens.FONT_FACE_SYM: this._font_face(); this._skipCruft(); break; case Tokens.KEYFRAMES_SYM: this._keyframes(); this._skipCruft(); break; case Tokens.VIEWPORT_SYM: this._viewport(); this._skipCruft(); break; case Tokens.UNKNOWN_SYM: //unknown @ rule tokenStream.get(); if (!this.options.strict){ this.fire({ type: "error", error: null, message: "Unknown @ rule: " + tokenStream.LT(0).value + ".", line: tokenStream.LT(0).startLine, col: tokenStream.LT(0).startCol }); count=0; while (tokenStream.advance([Tokens.LBRACE, Tokens.RBRACE]) == Tokens.LBRACE){ count++; //keep track of nesting depth } while(count){ tokenStream.advance([Tokens.RBRACE]); count--; } } else { throw new SyntaxError("Unknown @ rule.", tokenStream.LT(0).startLine, tokenStream.LT(0).startCol); } break; case Tokens.S: this._readWhitespace(); break; default: if(!this._ruleset()){ switch(tt){ case Tokens.CHARSET_SYM: token = tokenStream.LT(1); this._charset(false); throw new SyntaxError("@charset not allowed here.", token.startLine, token.startCol); case Tokens.IMPORT_SYM: token = tokenStream.LT(1); this._import(false); throw new SyntaxError("@import not allowed here.", token.startLine, token.startCol); case Tokens.NAMESPACE_SYM: token = tokenStream.LT(1); this._namespace(false); throw new SyntaxError("@namespace not allowed here.", token.startLine, token.startCol); default: tokenStream.get(); //get the last token this._unexpectedToken(tokenStream.token()); } } } } catch(ex) { if (ex instanceof SyntaxError && !this.options.strict){ this.fire({ type: "error", error: ex, message: ex.message, line: ex.line, col: ex.col }); } else { throw ex; } } tt = tokenStream.peek(); } if (tt != Tokens.EOF){ this._unexpectedToken(tokenStream.token()); } this.fire("endstylesheet"); }, _charset: function(emit){ var tokenStream = this._tokenStream, charset, token, line, col; if (tokenStream.match(Tokens.CHARSET_SYM)){ line = tokenStream.token().startLine; col = tokenStream.token().startCol; this._readWhitespace(); tokenStream.mustMatch(Tokens.STRING); token = tokenStream.token(); charset = token.value; this._readWhitespace(); tokenStream.mustMatch(Tokens.SEMICOLON); if (emit !== false){ this.fire({ type: "charset", charset:charset, line: line, col: col }); } } }, _import: function(emit){ var tokenStream = this._tokenStream, tt, uri, importToken, mediaList = []; tokenStream.mustMatch(Tokens.IMPORT_SYM); importToken = tokenStream.token(); this._readWhitespace(); tokenStream.mustMatch([Tokens.STRING, Tokens.URI]); uri = tokenStream.token().value.replace(/^(?:url\()?["']?([^"']+?)["']?\)?$/, "$1"); this._readWhitespace(); mediaList = this._media_query_list(); tokenStream.mustMatch(Tokens.SEMICOLON); this._readWhitespace(); if (emit !== false){ this.fire({ type: "import", uri: uri, media: mediaList, line: importToken.startLine, col: importToken.startCol }); } }, _namespace: function(emit){ var tokenStream = this._tokenStream, line, col, prefix, uri; tokenStream.mustMatch(Tokens.NAMESPACE_SYM); line = tokenStream.token().startLine; col = tokenStream.token().startCol; this._readWhitespace(); if (tokenStream.match(Tokens.IDENT)){ prefix = tokenStream.token().value; this._readWhitespace(); } tokenStream.mustMatch([Tokens.STRING, Tokens.URI]); uri = tokenStream.token().value.replace(/(?:url\()?["']([^"']+)["']\)?/, "$1"); this._readWhitespace(); tokenStream.mustMatch(Tokens.SEMICOLON); this._readWhitespace(); if (emit !== false){ this.fire({ type: "namespace", prefix: prefix, uri: uri, line: line, col: col }); } }, _media: function(){ var tokenStream = this._tokenStream, line, col, mediaList;// = []; tokenStream.mustMatch(Tokens.MEDIA_SYM); line = tokenStream.token().startLine; col = tokenStream.token().startCol; this._readWhitespace(); mediaList = this._media_query_list(); tokenStream.mustMatch(Tokens.LBRACE); this._readWhitespace(); this.fire({ type: "startmedia", media: mediaList, line: line, col: col }); while(true) { if (tokenStream.peek() == Tokens.PAGE_SYM){ this._page(); } else if (tokenStream.peek() == Tokens.FONT_FACE_SYM){ this._font_face(); } else if (tokenStream.peek() == Tokens.VIEWPORT_SYM){ this._viewport(); } else if (!this._ruleset()){ break; } } tokenStream.mustMatch(Tokens.RBRACE); this._readWhitespace(); this.fire({ type: "endmedia", media: mediaList, line: line, col: col }); }, _media_query_list: function(){ var tokenStream = this._tokenStream, mediaList = []; this._readWhitespace(); if (tokenStream.peek() == Tokens.IDENT || tokenStream.peek() == Tokens.LPAREN){ mediaList.push(this._media_query()); } while(tokenStream.match(Tokens.COMMA)){ this._readWhitespace(); mediaList.push(this._media_query()); } return mediaList; }, _media_query: function(){ var tokenStream = this._tokenStream, type = null, ident = null, token = null, expressions = []; if (tokenStream.match(Tokens.IDENT)){ ident = tokenStream.token().value.toLowerCase(); if (ident != "only" && ident != "not"){ tokenStream.unget(); ident = null; } else { token = tokenStream.token(); } } this._readWhitespace(); if (tokenStream.peek() == Tokens.IDENT){ type = this._media_type(); if (token === null){ token = tokenStream.token(); } } else if (tokenStream.peek() == Tokens.LPAREN){ if (token === null){ token = tokenStream.LT(1); } expressions.push(this._media_expression()); } if (type === null && expressions.length === 0){ return null; } else { this._readWhitespace(); while (tokenStream.match(Tokens.IDENT)){ if (tokenStream.token().value.toLowerCase() != "and"){ this._unexpectedToken(tokenStream.token()); } this._readWhitespace(); expressions.push(this._media_expression()); } } return new MediaQuery(ident, type, expressions, token.startLine, token.startCol); }, _media_type: function(){ return this._media_feature(); }, _media_expression: function(){ var tokenStream = this._tokenStream, feature = null, token, expression = null; tokenStream.mustMatch(Tokens.LPAREN); feature = this._media_feature(); this._readWhitespace(); if (tokenStream.match(Tokens.COLON)){ this._readWhitespace(); token = tokenStream.LT(1); expression = this._expression(); } tokenStream.mustMatch(Tokens.RPAREN); this._readWhitespace(); return new MediaFeature(feature, (expression ? new SyntaxUnit(expression, token.startLine, token.startCol) : null)); }, _media_feature: function(){ var tokenStream = this._tokenStream; tokenStream.mustMatch(Tokens.IDENT); return SyntaxUnit.fromToken(tokenStream.token()); }, _page: function(){ var tokenStream = this._tokenStream, line, col, identifier = null, pseudoPage = null; tokenStream.mustMatch(Tokens.PAGE_SYM); line = tokenStream.token().startLine; col = tokenStream.token().startCol; this._readWhitespace(); if (tokenStream.match(Tokens.IDENT)){ identifier = tokenStream.token().value; if (identifier.toLowerCase() === "auto"){ this._unexpectedToken(tokenStream.token()); } } if (tokenStream.peek() == Tokens.COLON){ pseudoPage = this._pseudo_page(); } this._readWhitespace(); this.fire({ type: "startpage", id: identifier, pseudo: pseudoPage, line: line, col: col }); this._readDeclarations(true, true); this.fire({ type: "endpage", id: identifier, pseudo: pseudoPage, line: line, col: col }); }, _margin: function(){ var tokenStream = this._tokenStream, line, col, marginSym = this._margin_sym(); if (marginSym){ line = tokenStream.token().startLine; col = tokenStream.token().startCol; this.fire({ type: "startpagemargin", margin: marginSym, line: line, col: col }); this._readDeclarations(true); this.fire({ type: "endpagemargin", margin: marginSym, line: line, col: col }); return true; } else { return false; } }, _margin_sym: function(){ var tokenStream = this._tokenStream; if(tokenStream.match([Tokens.TOPLEFTCORNER_SYM, Tokens.TOPLEFT_SYM, Tokens.TOPCENTER_SYM, Tokens.TOPRIGHT_SYM, Tokens.TOPRIGHTCORNER_SYM, Tokens.BOTTOMLEFTCORNER_SYM, Tokens.BOTTOMLEFT_SYM, Tokens.BOTTOMCENTER_SYM, Tokens.BOTTOMRIGHT_SYM, Tokens.BOTTOMRIGHTCORNER_SYM, Tokens.LEFTTOP_SYM, Tokens.LEFTMIDDLE_SYM, Tokens.LEFTBOTTOM_SYM, Tokens.RIGHTTOP_SYM, Tokens.RIGHTMIDDLE_SYM, Tokens.RIGHTBOTTOM_SYM])) { return SyntaxUnit.fromToken(tokenStream.token()); } else { return null; } }, _pseudo_page: function(){ var tokenStream = this._tokenStream; tokenStream.mustMatch(Tokens.COLON); tokenStream.mustMatch(Tokens.IDENT); return tokenStream.token().value; }, _font_face: function(){ var tokenStream = this._tokenStream, line, col; tokenStream.mustMatch(Tokens.FONT_FACE_SYM); line = tokenStream.token().startLine; col = tokenStream.token().startCol; this._readWhitespace(); this.fire({ type: "startfontface", line: line, col: col }); this._readDeclarations(true); this.fire({ type: "endfontface", line: line, col: col }); }, _viewport: function(){ var tokenStream = this._tokenStream, line, col; tokenStream.mustMatch(Tokens.VIEWPORT_SYM); line = tokenStream.token().startLine; col = tokenStream.token().startCol; this._readWhitespace(); this.fire({ type: "startviewport", line: line, col: col }); this._readDeclarations(true); this.fire({ type: "endviewport", line: line, col: col }); }, _operator: function(inFunction){ var tokenStream = this._tokenStream, token = null; if (tokenStream.match([Tokens.SLASH, Tokens.COMMA]) || (inFunction && tokenStream.match([Tokens.PLUS, Tokens.STAR, Tokens.MINUS]))){ token = tokenStream.token(); this._readWhitespace(); } return token ? PropertyValuePart.fromToken(token) : null; }, _combinator: function(){ var tokenStream = this._tokenStream, value = null, token; if(tokenStream.match([Tokens.PLUS, Tokens.GREATER, Tokens.TILDE])){ token = tokenStream.token(); value = new Combinator(token.value, token.startLine, token.startCol); this._readWhitespace(); } return value; }, _unary_operator: function(){ var tokenStream = this._tokenStream; if (tokenStream.match([Tokens.MINUS, Tokens.PLUS])){ return tokenStream.token().value; } else { return null; } }, _property: function(){ var tokenStream = this._tokenStream, value = null, hack = null, tokenValue, token, line, col; if (tokenStream.peek() == Tokens.STAR && this.options.starHack){ tokenStream.get(); token = tokenStream.token(); hack = token.value; line = token.startLine; col = token.startCol; } if(tokenStream.match(Tokens.IDENT)){ token = tokenStream.token(); tokenValue = token.value; if (tokenValue.charAt(0) == "_" && this.options.underscoreHack){ hack = "_"; tokenValue = tokenValue.substring(1); } value = new PropertyName(tokenValue, hack, (line||token.startLine), (col||token.startCol)); this._readWhitespace(); } return value; }, _ruleset: function(){ var tokenStream = this._tokenStream, tt, selectors; try { selectors = this._selectors_group(); } catch (ex){ if (ex instanceof SyntaxError && !this.options.strict){ this.fire({ type: "error", error: ex, message: ex.message, line: ex.line, col: ex.col }); tt = tokenStream.advance([Tokens.RBRACE]); if (tt == Tokens.RBRACE){ } else { throw ex; } } else { throw ex; } return true; } if (selectors){ this.fire({ type: "startrule", selectors: selectors, line: selectors[0].line, col: selectors[0].col }); this._readDeclarations(true); this.fire({ type: "endrule", selectors: selectors, line: selectors[0].line, col: selectors[0].col }); } return selectors; }, _selectors_group: function(){ var tokenStream = this._tokenStream, selectors = [], selector; selector = this._selector(); if (selector !== null){ selectors.push(selector); while(tokenStream.match(Tokens.COMMA)){ this._readWhitespace(); selector = this._selector(); if (selector !== null){ selectors.push(selector); } else { this._unexpectedToken(tokenStream.LT(1)); } } } return selectors.length ? selectors : null; }, _selector: function(){ var tokenStream = this._tokenStream, selector = [], nextSelector = null, combinator = null, ws = null; nextSelector = this._simple_selector_sequence(); if (nextSelector === null){ return null; } selector.push(nextSelector); do { combinator = this._combinator(); if (combinator !== null){ selector.push(combinator); nextSelector = this._simple_selector_sequence(); if (nextSelector === null){ this._unexpectedToken(tokenStream.LT(1)); } else { selector.push(nextSelector); } } else { if (this._readWhitespace()){ ws = new Combinator(tokenStream.token().value, tokenStream.token().startLine, tokenStream.token().startCol); combinator = this._combinator(); nextSelector = this._simple_selector_sequence(); if (nextSelector === null){ if (combinator !== null){ this._unexpectedToken(tokenStream.LT(1)); } } else { if (combinator !== null){ selector.push(combinator); } else { selector.push(ws); } selector.push(nextSelector); } } else { break; } } } while(true); return new Selector(selector, selector[0].line, selector[0].col); }, _simple_selector_sequence: function(){ var tokenStream = this._tokenStream, elementName = null, modifiers = [], selectorText= "", components = [ function(){ return tokenStream.match(Tokens.HASH) ? new SelectorSubPart(tokenStream.token().value, "id", tokenStream.token().startLine, tokenStream.token().startCol) : null; }, this._class, this._attrib, this._pseudo, this._negation ], i = 0, len = components.length, component = null, found = false, line, col; line = tokenStream.LT(1).startLine; col = tokenStream.LT(1).startCol; elementName = this._type_selector(); if (!elementName){ elementName = this._universal(); } if (elementName !== null){ selectorText += elementName; } while(true){ if (tokenStream.peek() === Tokens.S){ break; } while(i < len && component === null){ component = components[i++].call(this); } if (component === null){ if (selectorText === ""){ return null; } else { break; } } else { i = 0; modifiers.push(component); selectorText += component.toString(); component = null; } } return selectorText !== "" ? new SelectorPart(elementName, modifiers, selectorText, line, col) : null; }, _type_selector: function(){ var tokenStream = this._tokenStream, ns = this._namespace_prefix(), elementName = this._element_name(); if (!elementName){ if (ns){ tokenStream.unget(); if (ns.length > 1){ tokenStream.unget(); } } return null; } else { if (ns){ elementName.text = ns + elementName.text; elementName.col -= ns.length; } return elementName; } }, _class: function(){ var tokenStream = this._tokenStream, token; if (tokenStream.match(Tokens.DOT)){ tokenStream.mustMatch(Tokens.IDENT); token = tokenStream.token(); return new SelectorSubPart("." + token.value, "class", token.startLine, token.startCol - 1); } else { return null; } }, _element_name: function(){ var tokenStream = this._tokenStream, token; if (tokenStream.match(Tokens.IDENT)){ token = tokenStream.token(); return new SelectorSubPart(token.value, "elementName", token.startLine, token.startCol); } else { return null; } }, _namespace_prefix: function(){ var tokenStream = this._tokenStream, value = ""; if (tokenStream.LA(1) === Tokens.PIPE || tokenStream.LA(2) === Tokens.PIPE){ if(tokenStream.match([Tokens.IDENT, Tokens.STAR])){ value += tokenStream.token().value; } tokenStream.mustMatch(Tokens.PIPE); value += "|"; } return value.length ? value : null; }, _universal: function(){ var tokenStream = this._tokenStream, value = "", ns; ns = this._namespace_prefix(); if(ns){ value += ns; } if(tokenStream.match(Tokens.STAR)){ value += "*"; } return value.length ? value : null; }, _attrib: function(){ var tokenStream = this._tokenStream, value = null, ns, token; if (tokenStream.match(Tokens.LBRACKET)){ token = tokenStream.token(); value = token.value; value += this._readWhitespace(); ns = this._namespace_prefix(); if (ns){ value += ns; } tokenStream.mustMatch(Tokens.IDENT); value += tokenStream.token().value; value += this._readWhitespace(); if(tokenStream.match([Tokens.PREFIXMATCH, Tokens.SUFFIXMATCH, Tokens.SUBSTRINGMATCH, Tokens.EQUALS, Tokens.INCLUDES, Tokens.DASHMATCH])){ value += tokenStream.token().value; value += this._readWhitespace(); tokenStream.mustMatch([Tokens.IDENT, Tokens.STRING]); value += tokenStream.token().value; value += this._readWhitespace(); } tokenStream.mustMatch(Tokens.RBRACKET); return new SelectorSubPart(value + "]", "attribute", token.startLine, token.startCol); } else { return null; } }, _pseudo: function(){ var tokenStream = this._tokenStream, pseudo = null, colons = ":", line, col; if (tokenStream.match(Tokens.COLON)){ if (tokenStream.match(Tokens.COLON)){ colons += ":"; } if (tokenStream.match(Tokens.IDENT)){ pseudo = tokenStream.token().value; line = tokenStream.token().startLine; col = tokenStream.token().startCol - colons.length; } else if (tokenStream.peek() == Tokens.FUNCTION){ line = tokenStream.LT(1).startLine; col = tokenStream.LT(1).startCol - colons.length; pseudo = this._functional_pseudo(); } if (pseudo){ pseudo = new SelectorSubPart(colons + pseudo, "pseudo", line, col); } } return pseudo; }, _functional_pseudo: function(){ var tokenStream = this._tokenStream, value = null; if(tokenStream.match(Tokens.FUNCTION)){ value = tokenStream.token().value; value += this._readWhitespace(); value += this._expression(); tokenStream.mustMatch(Tokens.RPAREN); value += ")"; } return value; }, _expression: function(){ var tokenStream = this._tokenStream, value = ""; while(tokenStream.match([Tokens.PLUS, Tokens.MINUS, Tokens.DIMENSION, Tokens.NUMBER, Tokens.STRING, Tokens.IDENT, Tokens.LENGTH, Tokens.FREQ, Tokens.ANGLE, Tokens.TIME, Tokens.RESOLUTION, Tokens.SLASH])){ value += tokenStream.token().value; value += this._readWhitespace(); } return value.length ? value : null; }, _negation: function(){ var tokenStream = this._tokenStream, line, col, value = "", arg, subpart = null; if (tokenStream.match(Tokens.NOT)){ value = tokenStream.token().value; line = tokenStream.token().startLine; col = tokenStream.token().startCol; value += this._readWhitespace(); arg = this._negation_arg(); value += arg; value += this._readWhitespace(); tokenStream.match(Tokens.RPAREN); value += tokenStream.token().value; subpart = new SelectorSubPart(value, "not", line, col); subpart.args.push(arg); } return subpart; }, _negation_arg: function(){ var tokenStream = this._tokenStream, args = [ this._type_selector, this._universal, function(){ return tokenStream.match(Tokens.HASH) ? new SelectorSubPart(tokenStream.token().value, "id", tokenStream.token().startLine, tokenStream.token().startCol) : null; }, this._class, this._attrib, this._pseudo ], arg = null, i = 0, len = args.length, elementName, line, col, part; line = tokenStream.LT(1).startLine; col = tokenStream.LT(1).startCol; while(i < len && arg === null){ arg = args[i].call(this); i++; } if (arg === null){ this._unexpectedToken(tokenStream.LT(1)); } if (arg.type == "elementName"){ part = new SelectorPart(arg, [], arg.toString(), line, col); } else { part = new SelectorPart(null, [arg], arg.toString(), line, col); } return part; }, _declaration: function(){ var tokenStream = this._tokenStream, property = null, expr = null, prio = null, error = null, invalid = null, propertyName= ""; property = this._property(); if (property !== null){ tokenStream.mustMatch(Tokens.COLON); this._readWhitespace(); expr = this._expr(); if (!expr || expr.length === 0){ this._unexpectedToken(tokenStream.LT(1)); } prio = this._prio(); propertyName = property.toString(); if (this.options.starHack && property.hack == "*" || this.options.underscoreHack && property.hack == "_") { propertyName = property.text; } try { this._validateProperty(propertyName, expr); } catch (ex) { invalid = ex; } this.fire({ type: "property", property: property, value: expr, important: prio, line: property.line, col: property.col, invalid: invalid }); return true; } else { return false; } }, _prio: function(){ var tokenStream = this._tokenStream, result = tokenStream.match(Tokens.IMPORTANT_SYM); this._readWhitespace(); return result; }, _expr: function(inFunction){ var tokenStream = this._tokenStream, values = [], value = null, operator = null; value = this._term(inFunction); if (value !== null){ values.push(value); do { operator = this._operator(inFunction); if (operator){ values.push(operator); } /*else { values.push(new PropertyValue(valueParts, valueParts[0].line, valueParts[0].col)); valueParts = []; }*/ value = this._term(inFunction); if (value === null){ break; } else { values.push(value); } } while(true); } return values.length > 0 ? new PropertyValue(values, values[0].line, values[0].col) : null; }, _term: function(inFunction){ var tokenStream = this._tokenStream, unary = null, value = null, endChar = null, token, line, col; unary = this._unary_operator(); if (unary !== null){ line = tokenStream.token().startLine; col = tokenStream.token().startCol; } if (tokenStream.peek() == Tokens.IE_FUNCTION && this.options.ieFilters){ value = this._ie_function(); if (unary === null){ line = tokenStream.token().startLine; col = tokenStream.token().startCol; } } else if (inFunction && tokenStream.match([Tokens.LPAREN, Tokens.LBRACE, Tokens.LBRACKET])){ token = tokenStream.token(); endChar = token.endChar; value = token.value + this._expr(inFunction).text; if (unary === null){ line = tokenStream.token().startLine; col = tokenStream.token().startCol; } tokenStream.mustMatch(Tokens.type(endChar)); value += endChar; this._readWhitespace(); } else if (tokenStream.match([Tokens.NUMBER, Tokens.PERCENTAGE, Tokens.LENGTH, Tokens.ANGLE, Tokens.TIME, Tokens.FREQ, Tokens.STRING, Tokens.IDENT, Tokens.URI, Tokens.UNICODE_RANGE])){ value = tokenStream.token().value; if (unary === null){ line = tokenStream.token().startLine; col = tokenStream.token().startCol; } this._readWhitespace(); } else { token = this._hexcolor(); if (token === null){ if (unary === null){ line = tokenStream.LT(1).startLine; col = tokenStream.LT(1).startCol; } if (value === null){ if (tokenStream.LA(3) == Tokens.EQUALS && this.options.ieFilters){ value = this._ie_function(); } else { value = this._function(); } } } else { value = token.value; if (unary === null){ line = token.startLine; col = token.startCol; } } } return value !== null ? new PropertyValuePart(unary !== null ? unary + value : value, line, col) : null; }, _function: function(){ var tokenStream = this._tokenStream, functionText = null, expr = null, lt; if (tokenStream.match(Tokens.FUNCTION)){ functionText = tokenStream.token().value; this._readWhitespace(); expr = this._expr(true); functionText += expr; if (this.options.ieFilters && tokenStream.peek() == Tokens.EQUALS){ do { if (this._readWhitespace()){ functionText += tokenStream.token().value; } if (tokenStream.LA(0) == Tokens.COMMA){ functionText += tokenStream.token().value; } tokenStream.match(Tokens.IDENT); functionText += tokenStream.token().value; tokenStream.match(Tokens.EQUALS); functionText += tokenStream.token().value; lt = tokenStream.peek(); while(lt != Tokens.COMMA && lt != Tokens.S && lt != Tokens.RPAREN){ tokenStream.get(); functionText += tokenStream.token().value; lt = tokenStream.peek(); } } while(tokenStream.match([Tokens.COMMA, Tokens.S])); } tokenStream.match(Tokens.RPAREN); functionText += ")"; this._readWhitespace(); } return functionText; }, _ie_function: function(){ var tokenStream = this._tokenStream, functionText = null, expr = null, lt; if (tokenStream.match([Tokens.IE_FUNCTION, Tokens.FUNCTION])){ functionText = tokenStream.token().value; do { if (this._readWhitespace()){ functionText += tokenStream.token().value; } if (tokenStream.LA(0) == Tokens.COMMA){ functionText += tokenStream.token().value; } tokenStream.match(Tokens.IDENT); functionText += tokenStream.token().value; tokenStream.match(Tokens.EQUALS); functionText += tokenStream.token().value; lt = tokenStream.peek(); while(lt != Tokens.COMMA && lt != Tokens.S && lt != Tokens.RPAREN){ tokenStream.get(); functionText += tokenStream.token().value; lt = tokenStream.peek(); } } while(tokenStream.match([Tokens.COMMA, Tokens.S])); tokenStream.match(Tokens.RPAREN); functionText += ")"; this._readWhitespace(); } return functionText; }, _hexcolor: function(){ var tokenStream = this._tokenStream, token = null, color; if(tokenStream.match(Tokens.HASH)){ token = tokenStream.token(); color = token.value; if (!/#[a-f0-9]{3,6}/i.test(color)){ throw new SyntaxError("Expected a hex color but found '" + color + "' at line " + token.startLine + ", col " + token.startCol + ".", token.startLine, token.startCol); } this._readWhitespace(); } return token; }, _keyframes: function(){ var tokenStream = this._tokenStream, token, tt, name, prefix = ""; tokenStream.mustMatch(Tokens.KEYFRAMES_SYM); token = tokenStream.token(); if (/^@\-([^\-]+)\-/.test(token.value)) { prefix = RegExp.$1; } this._readWhitespace(); name = this._keyframe_name(); this._readWhitespace(); tokenStream.mustMatch(Tokens.LBRACE); this.fire({ type: "startkeyframes", name: name, prefix: prefix, line: token.startLine, col: token.startCol }); this._readWhitespace(); tt = tokenStream.peek(); while(tt == Tokens.IDENT || tt == Tokens.PERCENTAGE) { this._keyframe_rule(); this._readWhitespace(); tt = tokenStream.peek(); } this.fire({ type: "endkeyframes", name: name, prefix: prefix, line: token.startLine, col: token.startCol }); this._readWhitespace(); tokenStream.mustMatch(Tokens.RBRACE); }, _keyframe_name: function(){ var tokenStream = this._tokenStream, token; tokenStream.mustMatch([Tokens.IDENT, Tokens.STRING]); return SyntaxUnit.fromToken(tokenStream.token()); }, _keyframe_rule: function(){ var tokenStream = this._tokenStream, token, keyList = this._key_list(); this.fire({ type: "startkeyframerule", keys: keyList, line: keyList[0].line, col: keyList[0].col }); this._readDeclarations(true); this.fire({ type: "endkeyframerule", keys: keyList, line: keyList[0].line, col: keyList[0].col }); }, _key_list: function(){ var tokenStream = this._tokenStream, token, key, keyList = []; keyList.push(this._key()); this._readWhitespace(); while(tokenStream.match(Tokens.COMMA)){ this._readWhitespace(); keyList.push(this._key()); this._readWhitespace(); } return keyList; }, _key: function(){ var tokenStream = this._tokenStream, token; if (tokenStream.match(Tokens.PERCENTAGE)){ return SyntaxUnit.fromToken(tokenStream.token()); } else if (tokenStream.match(Tokens.IDENT)){ token = tokenStream.token(); if (/from|to/i.test(token.value)){ return SyntaxUnit.fromToken(token); } tokenStream.unget(); } this._unexpectedToken(tokenStream.LT(1)); }, _skipCruft: function(){ while(this._tokenStream.match([Tokens.S, Tokens.CDO, Tokens.CDC])){ } }, _readDeclarations: function(checkStart, readMargins){ var tokenStream = this._tokenStream, tt; this._readWhitespace(); if (checkStart){ tokenStream.mustMatch(Tokens.LBRACE); } this._readWhitespace(); try { while(true){ if (tokenStream.match(Tokens.SEMICOLON) || (readMargins && this._margin())){ } else if (this._declaration()){ if (!tokenStream.match(Tokens.SEMICOLON)){ break; } } else { break; } this._readWhitespace(); } tokenStream.mustMatch(Tokens.RBRACE); this._readWhitespace(); } catch (ex) { if (ex instanceof SyntaxError && !this.options.strict){ this.fire({ type: "error", error: ex, message: ex.message, line: ex.line, col: ex.col }); tt = tokenStream.advance([Tokens.SEMICOLON, Tokens.RBRACE]); if (tt == Tokens.SEMICOLON){ this._readDeclarations(false, readMargins); } else if (tt != Tokens.RBRACE){ throw ex; } } else { throw ex; } } }, _readWhitespace: function(){ var tokenStream = this._tokenStream, ws = ""; while(tokenStream.match(Tokens.S)){ ws += tokenStream.token().value; } return ws; }, _unexpectedToken: function(token){ throw new SyntaxError("Unexpected token '" + token.value + "' at line " + token.startLine + ", col " + token.startCol + ".", token.startLine, token.startCol); }, _verifyEnd: function(){ if (this._tokenStream.LA(1) != Tokens.EOF){ this._unexpectedToken(this._tokenStream.LT(1)); } }, _validateProperty: function(property, value){ Validation.validate(property, value); }, parse: function(input){ this._tokenStream = new TokenStream(input, Tokens); this._stylesheet(); }, parseStyleSheet: function(input){ return this.parse(input); }, parseMediaQuery: function(input){ this._tokenStream = new TokenStream(input, Tokens); var result = this._media_query(); this._verifyEnd(); return result; }, parsePropertyValue: function(input){ this._tokenStream = new TokenStream(input, Tokens); this._readWhitespace(); var result = this._expr(); this._readWhitespace(); this._verifyEnd(); return result; }, parseRule: function(input){ this._tokenStream = new TokenStream(input, Tokens); this._readWhitespace(); var result = this._ruleset(); this._readWhitespace(); this._verifyEnd(); return result; }, parseSelector: function(input){ this._tokenStream = new TokenStream(input, Tokens); this._readWhitespace(); var result = this._selector(); this._readWhitespace(); this._verifyEnd(); return result; }, parseStyleAttribute: function(input){ input += "}"; // for error recovery in _readDeclarations() this._tokenStream = new TokenStream(input, Tokens); this._readDeclarations(); } }; for (prop in additions){ if (additions.hasOwnProperty(prop)){ proto[prop] = additions[prop]; } } return proto; }(); var Properties = { "align-items" : "flex-start | flex-end | center | baseline | stretch", "align-content" : "flex-start | flex-end | center | space-between | space-around | stretch", "align-self" : "auto | flex-start | flex-end | center | baseline | stretch", "-webkit-align-items" : "flex-start | flex-end | center | baseline | stretch", "-webkit-align-content" : "flex-start | flex-end | center | space-between | space-around | stretch", "-webkit-align-self" : "auto | flex-start | flex-end | center | baseline | stretch", "alignment-adjust" : "auto | baseline | before-edge | text-before-edge | middle | central | after-edge | text-after-edge | ideographic | alphabetic | hanging | mathematical | <percentage> | <length>", "alignment-baseline" : "baseline | use-script | before-edge | text-before-edge | after-edge | text-after-edge | central | middle | ideographic | alphabetic | hanging | mathematical", "animation" : 1, "animation-delay" : { multi: "<time>", comma: true }, "animation-direction" : { multi: "normal | alternate", comma: true }, "animation-duration" : { multi: "<time>", comma: true }, "animation-fill-mode" : { multi: "none | forwards | backwards | both", comma: true }, "animation-iteration-count" : { multi: "<number> | infinite", comma: true }, "animation-name" : { multi: "none | <ident>", comma: true }, "animation-play-state" : { multi: "running | paused", comma: true }, "animation-timing-function" : 1, "-moz-animation-delay" : { multi: "<time>", comma: true }, "-moz-animation-direction" : { multi: "normal | alternate", comma: true }, "-moz-animation-duration" : { multi: "<time>", comma: true }, "-moz-animation-iteration-count" : { multi: "<number> | infinite", comma: true }, "-moz-animation-name" : { multi: "none | <ident>", comma: true }, "-moz-animation-play-state" : { multi: "running | paused", comma: true }, "-ms-animation-delay" : { multi: "<time>", comma: true }, "-ms-animation-direction" : { multi: "normal | alternate", comma: true }, "-ms-animation-duration" : { multi: "<time>", comma: true }, "-ms-animation-iteration-count" : { multi: "<number> | infinite", comma: true }, "-ms-animation-name" : { multi: "none | <ident>", comma: true }, "-ms-animation-play-state" : { multi: "running | paused", comma: true }, "-webkit-animation-delay" : { multi: "<time>", comma: true }, "-webkit-animation-direction" : { multi: "normal | alternate", comma: true }, "-webkit-animation-duration" : { multi: "<time>", comma: true }, "-webkit-animation-fill-mode" : { multi: "none | forwards | backwards | both", comma: true }, "-webkit-animation-iteration-count" : { multi: "<number> | infinite", comma: true }, "-webkit-animation-name" : { multi: "none | <ident>", comma: true }, "-webkit-animation-play-state" : { multi: "running | paused", comma: true }, "-o-animation-delay" : { multi: "<time>", comma: true }, "-o-animation-direction" : { multi: "normal | alternate", comma: true }, "-o-animation-duration" : { multi: "<time>", comma: true }, "-o-animation-iteration-count" : { multi: "<number> | infinite", comma: true }, "-o-animation-name" : { multi: "none | <ident>", comma: true }, "-o-animation-play-state" : { multi: "running | paused", comma: true }, "appearance" : "icon | window | desktop | workspace | document | tooltip | dialog | button | push-button | hyperlink | radio-button | checkbox | menu-item | tab | menu | menubar | pull-down-menu | pop-up-menu | list-menu | radio-group | checkbox-group | outline-tree | range | field | combo-box | signature | password | normal | none | inherit", "azimuth" : function (expression) { var simple = "<angle> | leftwards | rightwards | inherit", direction = "left-side | far-left | left | center-left | center | center-right | right | far-right | right-side", behind = false, valid = false, part; if (!ValidationTypes.isAny(expression, simple)) { if (ValidationTypes.isAny(expression, "behind")) { behind = true; valid = true; } if (ValidationTypes.isAny(expression, direction)) { valid = true; if (!behind) { ValidationTypes.isAny(expression, "behind"); } } } if (expression.hasNext()) { part = expression.next(); if (valid) { throw new ValidationError("Expected end of value but found '" + part + "'.", part.line, part.col); } else { throw new ValidationError("Expected (<'azimuth'>) but found '" + part + "'.", part.line, part.col); } } }, "backface-visibility" : "visible | hidden", "background" : 1, "background-attachment" : { multi: "<attachment>", comma: true }, "background-clip" : { multi: "<box>", comma: true }, "background-color" : "<color> | inherit", "background-image" : { multi: "<bg-image>", comma: true }, "background-origin" : { multi: "<box>", comma: true }, "background-position" : { multi: "<bg-position>", comma: true }, "background-repeat" : { multi: "<repeat-style>" }, "background-size" : { multi: "<bg-size>", comma: true }, "baseline-shift" : "baseline | sub | super | <percentage> | <length>", "behavior" : 1, "binding" : 1, "bleed" : "<length>", "bookmark-label" : "<content> | <attr> | <string>", "bookmark-level" : "none | <integer>", "bookmark-state" : "open | closed", "bookmark-target" : "none | <uri> | <attr>", "border" : "<border-width> || <border-style> || <color>", "border-bottom" : "<border-width> || <border-style> || <color>", "border-bottom-color" : "<color> | inherit", "border-bottom-left-radius" : "<x-one-radius>", "border-bottom-right-radius" : "<x-one-radius>", "border-bottom-style" : "<border-style>", "border-bottom-width" : "<border-width>", "border-collapse" : "collapse | separate | inherit", "border-color" : { multi: "<color> | inherit", max: 4 }, "border-image" : 1, "border-image-outset" : { multi: "<length> | <number>", max: 4 }, "border-image-repeat" : { multi: "stretch | repeat | round", max: 2 }, "border-image-slice" : function(expression) { var valid = false, numeric = "<number> | <percentage>", fill = false, count = 0, max = 4, part; if (ValidationTypes.isAny(expression, "fill")) { fill = true; valid = true; } while (expression.hasNext() && count < max) { valid = ValidationTypes.isAny(expression, numeric); if (!valid) { break; } count++; } if (!fill) { ValidationTypes.isAny(expression, "fill"); } else { valid = true; } if (expression.hasNext()) { part = expression.next(); if (valid) { throw new ValidationError("Expected end of value but found '" + part + "'.", part.line, part.col); } else { throw new ValidationError("Expected ([<number> | <percentage>]{1,4} && fill?) but found '" + part + "'.", part.line, part.col); } } }, "border-image-source" : "<image> | none", "border-image-width" : { multi: "<length> | <percentage> | <number> | auto", max: 4 }, "border-left" : "<border-width> || <border-style> || <color>", "border-left-color" : "<color> | inherit", "border-left-style" : "<border-style>", "border-left-width" : "<border-width>", "border-radius" : function(expression) { var valid = false, simple = "<length> | <percentage> | inherit", slash = false, fill = false, count = 0, max = 8, part; while (expression.hasNext() && count < max) { valid = ValidationTypes.isAny(expression, simple); if (!valid) { if (expression.peek() == "/" && count > 0 && !slash) { slash = true; max = count + 5; expression.next(); } else { break; } } count++; } if (expression.hasNext()) { part = expression.next(); if (valid) { throw new ValidationError("Expected end of value but found '" + part + "'.", part.line, part.col); } else { throw new ValidationError("Expected (<'border-radius'>) but found '" + part + "'.", part.line, part.col); } } }, "border-right" : "<border-width> || <border-style> || <color>", "border-right-color" : "<color> | inherit", "border-right-style" : "<border-style>", "border-right-width" : "<border-width>", "border-spacing" : { multi: "<length> | inherit", max: 2 }, "border-style" : { multi: "<border-style>", max: 4 }, "border-top" : "<border-width> || <border-style> || <color>", "border-top-color" : "<color> | inherit", "border-top-left-radius" : "<x-one-radius>", "border-top-right-radius" : "<x-one-radius>", "border-top-style" : "<border-style>", "border-top-width" : "<border-width>", "border-width" : { multi: "<border-width>", max: 4 }, "bottom" : "<margin-width> | inherit", "-moz-box-align" : "start | end | center | baseline | stretch", "-moz-box-decoration-break" : "slice |clone", "-moz-box-direction" : "normal | reverse | inherit", "-moz-box-flex" : "<number>", "-moz-box-flex-group" : "<integer>", "-moz-box-lines" : "single | multiple", "-moz-box-ordinal-group" : "<integer>", "-moz-box-orient" : "horizontal | vertical | inline-axis | block-axis | inherit", "-moz-box-pack" : "start | end | center | justify", "-webkit-box-align" : "start | end | center | baseline | stretch", "-webkit-box-decoration-break" : "slice |clone", "-webkit-box-direction" : "normal | reverse | inherit", "-webkit-box-flex" : "<number>", "-webkit-box-flex-group" : "<integer>", "-webkit-box-lines" : "single | multiple", "-webkit-box-ordinal-group" : "<integer>", "-webkit-box-orient" : "horizontal | vertical | inline-axis | block-axis | inherit", "-webkit-box-pack" : "start | end | center | justify", "box-shadow" : function (expression) { var result = false, part; if (!ValidationTypes.isAny(expression, "none")) { Validation.multiProperty("<shadow>", expression, true, Infinity); } else { if (expression.hasNext()) { part = expression.next(); throw new ValidationError("Expected end of value but found '" + part + "'.", part.line, part.col); } } }, "box-sizing" : "content-box | border-box | inherit", "break-after" : "auto | always | avoid | left | right | page | column | avoid-page | avoid-column", "break-before" : "auto | always | avoid | left | right | page | column | avoid-page | avoid-column", "break-inside" : "auto | avoid | avoid-page | avoid-column", "caption-side" : "top | bottom | inherit", "clear" : "none | right | left | both | inherit", "clip" : 1, "color" : "<color> | inherit", "color-profile" : 1, "column-count" : "<integer> | auto", //http://www.w3.org/TR/css3-multicol/ "column-fill" : "auto | balance", "column-gap" : "<length> | normal", "column-rule" : "<border-width> || <border-style> || <color>", "column-rule-color" : "<color>", "column-rule-style" : "<border-style>", "column-rule-width" : "<border-width>", "column-span" : "none | all", "column-width" : "<length> | auto", "columns" : 1, "content" : 1, "counter-increment" : 1, "counter-reset" : 1, "crop" : "<shape> | auto", "cue" : "cue-after | cue-before | inherit", "cue-after" : 1, "cue-before" : 1, "cursor" : 1, "direction" : "ltr | rtl | inherit", "display" : "inline | block | list-item | inline-block | table | inline-table | table-row-group | table-header-group | table-footer-group | table-row | table-column-group | table-column | table-cell | table-caption | grid | inline-grid | none | inherit | -moz-box | -moz-inline-block | -moz-inline-box | -moz-inline-grid | -moz-inline-stack | -moz-inline-table | -moz-grid | -moz-grid-group | -moz-grid-line | -moz-groupbox | -moz-deck | -moz-popup | -moz-stack | -moz-marker | -webkit-box | -webkit-inline-box | -ms-flexbox | -ms-inline-flexbox | flex | -webkit-flex | inline-flex | -webkit-inline-flex", "dominant-baseline" : 1, "drop-initial-after-adjust" : "central | middle | after-edge | text-after-edge | ideographic | alphabetic | mathematical | <percentage> | <length>", "drop-initial-after-align" : "baseline | use-script | before-edge | text-before-edge | after-edge | text-after-edge | central | middle | ideographic | alphabetic | hanging | mathematical", "drop-initial-before-adjust" : "before-edge | text-before-edge | central | middle | hanging | mathematical | <percentage> | <length>", "drop-initial-before-align" : "caps-height | baseline | use-script | before-edge | text-before-edge | after-edge | text-after-edge | central | middle | ideographic | alphabetic | hanging | mathematical", "drop-initial-size" : "auto | line | <length> | <percentage>", "drop-initial-value" : "initial | <integer>", "elevation" : "<angle> | below | level | above | higher | lower | inherit", "empty-cells" : "show | hide | inherit", "filter" : 1, "fit" : "fill | hidden | meet | slice", "fit-position" : 1, "flex" : "<flex>", "flex-basis" : "<width>", "flex-direction" : "row | row-reverse | column | column-reverse", "flex-flow" : "<flex-direction> || <flex-wrap>", "flex-grow" : "<number>", "flex-shrink" : "<number>", "flex-wrap" : "nowrap | wrap | wrap-reverse", "-webkit-flex" : "<flex>", "-webkit-flex-basis" : "<width>", "-webkit-flex-direction" : "row | row-reverse | column | column-reverse", "-webkit-flex-flow" : "<flex-direction> || <flex-wrap>", "-webkit-flex-grow" : "<number>", "-webkit-flex-shrink" : "<number>", "-webkit-flex-wrap" : "nowrap | wrap | wrap-reverse", "-ms-flex" : "<flex>", "-ms-flex-align" : "start | end | center | stretch | baseline", "-ms-flex-direction" : "row | row-reverse | column | column-reverse | inherit", "-ms-flex-order" : "<number>", "-ms-flex-pack" : "start | end | center | justify", "-ms-flex-wrap" : "nowrap | wrap | wrap-reverse", "float" : "left | right | none | inherit", "float-offset" : 1, "font" : 1, "font-family" : 1, "font-size" : "<absolute-size> | <relative-size> | <length> | <percentage> | inherit", "font-size-adjust" : "<number> | none | inherit", "font-stretch" : "normal | ultra-condensed | extra-condensed | condensed | semi-condensed | semi-expanded | expanded | extra-expanded | ultra-expanded | inherit", "font-style" : "normal | italic | oblique | inherit", "font-variant" : "normal | small-caps | inherit", "font-weight" : "normal | bold | bolder | lighter | 100 | 200 | 300 | 400 | 500 | 600 | 700 | 800 | 900 | inherit", "grid-cell-stacking" : "columns | rows | layer", "grid-column" : 1, "grid-columns" : 1, "grid-column-align" : "start | end | center | stretch", "grid-column-sizing" : 1, "grid-column-span" : "<integer>", "grid-flow" : "none | rows | columns", "grid-layer" : "<integer>", "grid-row" : 1, "grid-rows" : 1, "grid-row-align" : "start | end | center | stretch", "grid-row-span" : "<integer>", "grid-row-sizing" : 1, "hanging-punctuation" : 1, "height" : "<margin-width> | <content-sizing> | inherit", "hyphenate-after" : "<integer> | auto", "hyphenate-before" : "<integer> | auto", "hyphenate-character" : "<string> | auto", "hyphenate-lines" : "no-limit | <integer>", "hyphenate-resource" : 1, "hyphens" : "none | manual | auto", "icon" : 1, "image-orientation" : "angle | auto", "image-rendering" : 1, "image-resolution" : 1, "inline-box-align" : "initial | last | <integer>", "justify-content" : "flex-start | flex-end | center | space-between | space-around", "-webkit-justify-content" : "flex-start | flex-end | center | space-between | space-around", "left" : "<margin-width> | inherit", "letter-spacing" : "<length> | normal | inherit", "line-height" : "<number> | <length> | <percentage> | normal | inherit", "line-break" : "auto | loose | normal | strict", "line-stacking" : 1, "line-stacking-ruby" : "exclude-ruby | include-ruby", "line-stacking-shift" : "consider-shifts | disregard-shifts", "line-stacking-strategy" : "inline-line-height | block-line-height | max-height | grid-height", "list-style" : 1, "list-style-image" : "<uri> | none | inherit", "list-style-position" : "inside | outside | inherit", "list-style-type" : "disc | circle | square | decimal | decimal-leading-zero | lower-roman | upper-roman | lower-greek | lower-latin | upper-latin | armenian | georgian | lower-alpha | upper-alpha | none | inherit", "margin" : { multi: "<margin-width> | inherit", max: 4 }, "margin-bottom" : "<margin-width> | inherit", "margin-left" : "<margin-width> | inherit", "margin-right" : "<margin-width> | inherit", "margin-top" : "<margin-width> | inherit", "mark" : 1, "mark-after" : 1, "mark-before" : 1, "marks" : 1, "marquee-direction" : 1, "marquee-play-count" : 1, "marquee-speed" : 1, "marquee-style" : 1, "max-height" : "<length> | <percentage> | <content-sizing> | none | inherit", "max-width" : "<length> | <percentage> | <content-sizing> | none | inherit", "min-height" : "<length> | <percentage> | <content-sizing> | contain-floats | -moz-contain-floats | -webkit-contain-floats | inherit", "min-width" : "<length> | <percentage> | <content-sizing> | contain-floats | -moz-contain-floats | -webkit-contain-floats | inherit", "move-to" : 1, "nav-down" : 1, "nav-index" : 1, "nav-left" : 1, "nav-right" : 1, "nav-up" : 1, "opacity" : "<number> | inherit", "order" : "<integer>", "-webkit-order" : "<integer>", "orphans" : "<integer> | inherit", "outline" : 1, "outline-color" : "<color> | invert | inherit", "outline-offset" : 1, "outline-style" : "<border-style> | inherit", "outline-width" : "<border-width> | inherit", "overflow" : "visible | hidden | scroll | auto | inherit", "overflow-style" : 1, "overflow-wrap" : "normal | break-word", "overflow-x" : 1, "overflow-y" : 1, "padding" : { multi: "<padding-width> | inherit", max: 4 }, "padding-bottom" : "<padding-width> | inherit", "padding-left" : "<padding-width> | inherit", "padding-right" : "<padding-width> | inherit", "padding-top" : "<padding-width> | inherit", "page" : 1, "page-break-after" : "auto | always | avoid | left | right | inherit", "page-break-before" : "auto | always | avoid | left | right | inherit", "page-break-inside" : "auto | avoid | inherit", "page-policy" : 1, "pause" : 1, "pause-after" : 1, "pause-before" : 1, "perspective" : 1, "perspective-origin" : 1, "phonemes" : 1, "pitch" : 1, "pitch-range" : 1, "play-during" : 1, "pointer-events" : "auto | none | visiblePainted | visibleFill | visibleStroke | visible | painted | fill | stroke | all | inherit", "position" : "static | relative | absolute | fixed | inherit", "presentation-level" : 1, "punctuation-trim" : 1, "quotes" : 1, "rendering-intent" : 1, "resize" : 1, "rest" : 1, "rest-after" : 1, "rest-before" : 1, "richness" : 1, "right" : "<margin-width> | inherit", "rotation" : 1, "rotation-point" : 1, "ruby-align" : 1, "ruby-overhang" : 1, "ruby-position" : 1, "ruby-span" : 1, "size" : 1, "speak" : "normal | none | spell-out | inherit", "speak-header" : "once | always | inherit", "speak-numeral" : "digits | continuous | inherit", "speak-punctuation" : "code | none | inherit", "speech-rate" : 1, "src" : 1, "stress" : 1, "string-set" : 1, "table-layout" : "auto | fixed | inherit", "tab-size" : "<integer> | <length>", "target" : 1, "target-name" : 1, "target-new" : 1, "target-position" : 1, "text-align" : "left | right | center | justify | inherit" , "text-align-last" : 1, "text-decoration" : 1, "text-emphasis" : 1, "text-height" : 1, "text-indent" : "<length> | <percentage> | inherit", "text-justify" : "auto | none | inter-word | inter-ideograph | inter-cluster | distribute | kashida", "text-outline" : 1, "text-overflow" : 1, "text-rendering" : "auto | optimizeSpeed | optimizeLegibility | geometricPrecision | inherit", "text-shadow" : 1, "text-transform" : "capitalize | uppercase | lowercase | none | inherit", "text-wrap" : "normal | none | avoid", "top" : "<margin-width> | inherit", "-ms-touch-action" : "auto | none | pan-x | pan-y", "touch-action" : "auto | none | pan-x | pan-y", "transform" : 1, "transform-origin" : 1, "transform-style" : 1, "transition" : 1, "transition-delay" : 1, "transition-duration" : 1, "transition-property" : 1, "transition-timing-function" : 1, "unicode-bidi" : "normal | embed | isolate | bidi-override | isolate-override | plaintext | inherit", "user-modify" : "read-only | read-write | write-only | inherit", "user-select" : "none | text | toggle | element | elements | all | inherit", "vertical-align" : "auto | use-script | baseline | sub | super | top | text-top | central | middle | bottom | text-bottom | <percentage> | <length>", "visibility" : "visible | hidden | collapse | inherit", "voice-balance" : 1, "voice-duration" : 1, "voice-family" : 1, "voice-pitch" : 1, "voice-pitch-range" : 1, "voice-rate" : 1, "voice-stress" : 1, "voice-volume" : 1, "volume" : 1, "white-space" : "normal | pre | nowrap | pre-wrap | pre-line | inherit | -pre-wrap | -o-pre-wrap | -moz-pre-wrap | -hp-pre-wrap", //http://perishablepress.com/wrapping-content/ "white-space-collapse" : 1, "widows" : "<integer> | inherit", "width" : "<length> | <percentage> | <content-sizing> | auto | inherit", "word-break" : "normal | keep-all | break-all", "word-spacing" : "<length> | normal | inherit", "word-wrap" : "normal | break-word", "writing-mode" : "horizontal-tb | vertical-rl | vertical-lr | lr-tb | rl-tb | tb-rl | bt-rl | tb-lr | bt-lr | lr-bt | rl-bt | lr | rl | tb | inherit", "z-index" : "<integer> | auto | inherit", "zoom" : "<number> | <percentage> | normal" }; function PropertyName(text, hack, line, col){ SyntaxUnit.call(this, text, line, col, Parser.PROPERTY_NAME_TYPE); this.hack = hack; } PropertyName.prototype = new SyntaxUnit(); PropertyName.prototype.constructor = PropertyName; PropertyName.prototype.toString = function(){ return (this.hack ? this.hack : "") + this.text; }; function PropertyValue(parts, line, col){ SyntaxUnit.call(this, parts.join(" "), line, col, Parser.PROPERTY_VALUE_TYPE); this.parts = parts; } PropertyValue.prototype = new SyntaxUnit(); PropertyValue.prototype.constructor = PropertyValue; function PropertyValueIterator(value){ this._i = 0; this._parts = value.parts; this._marks = []; this.value = value; } PropertyValueIterator.prototype.count = function(){ return this._parts.length; }; PropertyValueIterator.prototype.isFirst = function(){ return this._i === 0; }; PropertyValueIterator.prototype.hasNext = function(){ return (this._i < this._parts.length); }; PropertyValueIterator.prototype.mark = function(){ this._marks.push(this._i); }; PropertyValueIterator.prototype.peek = function(count){ return this.hasNext() ? this._parts[this._i + (count || 0)] : null; }; PropertyValueIterator.prototype.next = function(){ return this.hasNext() ? this._parts[this._i++] : null; }; PropertyValueIterator.prototype.previous = function(){ return this._i > 0 ? this._parts[--this._i] : null; }; PropertyValueIterator.prototype.restore = function(){ if (this._marks.length){ this._i = this._marks.pop(); } }; function PropertyValuePart(text, line, col){ SyntaxUnit.call(this, text, line, col, Parser.PROPERTY_VALUE_PART_TYPE); this.type = "unknown"; var temp; if (/^([+\-]?[\d\.]+)([a-z]+)$/i.test(text)){ //dimension this.type = "dimension"; this.value = +RegExp.$1; this.units = RegExp.$2; switch(this.units.toLowerCase()){ case "em": case "rem": case "ex": case "px": case "cm": case "mm": case "in": case "pt": case "pc": case "ch": case "vh": case "vw": case "vmax": case "vmin": this.type = "length"; break; case "deg": case "rad": case "grad": this.type = "angle"; break; case "ms": case "s": this.type = "time"; break; case "hz": case "khz": this.type = "frequency"; break; case "dpi": case "dpcm": this.type = "resolution"; break; } } else if (/^([+\-]?[\d\.]+)%$/i.test(text)){ //percentage this.type = "percentage"; this.value = +RegExp.$1; } else if (/^([+\-]?\d+)$/i.test(text)){ //integer this.type = "integer"; this.value = +RegExp.$1; } else if (/^([+\-]?[\d\.]+)$/i.test(text)){ //number this.type = "number"; this.value = +RegExp.$1; } else if (/^#([a-f0-9]{3,6})/i.test(text)){ //hexcolor this.type = "color"; temp = RegExp.$1; if (temp.length == 3){ this.red = parseInt(temp.charAt(0)+temp.charAt(0),16); this.green = parseInt(temp.charAt(1)+temp.charAt(1),16); this.blue = parseInt(temp.charAt(2)+temp.charAt(2),16); } else { this.red = parseInt(temp.substring(0,2),16); this.green = parseInt(temp.substring(2,4),16); this.blue = parseInt(temp.substring(4,6),16); } } else if (/^rgb\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)/i.test(text)){ //rgb() color with absolute numbers this.type = "color"; this.red = +RegExp.$1; this.green = +RegExp.$2; this.blue = +RegExp.$3; } else if (/^rgb\(\s*(\d+)%\s*,\s*(\d+)%\s*,\s*(\d+)%\s*\)/i.test(text)){ //rgb() color with percentages this.type = "color"; this.red = +RegExp.$1 * 255 / 100; this.green = +RegExp.$2 * 255 / 100; this.blue = +RegExp.$3 * 255 / 100; } else if (/^rgba\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*,\s*([\d\.]+)\s*\)/i.test(text)){ //rgba() color with absolute numbers this.type = "color"; this.red = +RegExp.$1; this.green = +RegExp.$2; this.blue = +RegExp.$3; this.alpha = +RegExp.$4; } else if (/^rgba\(\s*(\d+)%\s*,\s*(\d+)%\s*,\s*(\d+)%\s*,\s*([\d\.]+)\s*\)/i.test(text)){ //rgba() color with percentages this.type = "color"; this.red = +RegExp.$1 * 255 / 100; this.green = +RegExp.$2 * 255 / 100; this.blue = +RegExp.$3 * 255 / 100; this.alpha = +RegExp.$4; } else if (/^hsl\(\s*(\d+)\s*,\s*(\d+)%\s*,\s*(\d+)%\s*\)/i.test(text)){ //hsl() this.type = "color"; this.hue = +RegExp.$1; this.saturation = +RegExp.$2 / 100; this.lightness = +RegExp.$3 / 100; } else if (/^hsla\(\s*(\d+)\s*,\s*(\d+)%\s*,\s*(\d+)%\s*,\s*([\d\.]+)\s*\)/i.test(text)){ //hsla() color with percentages this.type = "color"; this.hue = +RegExp.$1; this.saturation = +RegExp.$2 / 100; this.lightness = +RegExp.$3 / 100; this.alpha = +RegExp.$4; } else if (/^url\(["']?([^\)"']+)["']?\)/i.test(text)){ //URI this.type = "uri"; this.uri = RegExp.$1; } else if (/^([^\(]+)\(/i.test(text)){ this.type = "function"; this.name = RegExp.$1; this.value = text; } else if (/^["'][^"']*["']/.test(text)){ //string this.type = "string"; this.value = eval(text); } else if (Colors[text.toLowerCase()]){ //named color this.type = "color"; temp = Colors[text.toLowerCase()].substring(1); this.red = parseInt(temp.substring(0,2),16); this.green = parseInt(temp.substring(2,4),16); this.blue = parseInt(temp.substring(4,6),16); } else if (/^[\,\/]$/.test(text)){ this.type = "operator"; this.value = text; } else if (/^[a-z\-_\u0080-\uFFFF][a-z0-9\-_\u0080-\uFFFF]*$/i.test(text)){ this.type = "identifier"; this.value = text; } } PropertyValuePart.prototype = new SyntaxUnit(); PropertyValuePart.prototype.constructor = PropertyValuePart; PropertyValuePart.fromToken = function(token){ return new PropertyValuePart(token.value, token.startLine, token.startCol); }; var Pseudos = { ":first-letter": 1, ":first-line": 1, ":before": 1, ":after": 1 }; Pseudos.ELEMENT = 1; Pseudos.CLASS = 2; Pseudos.isElement = function(pseudo){ return pseudo.indexOf("::") === 0 || Pseudos[pseudo.toLowerCase()] == Pseudos.ELEMENT; }; function Selector(parts, line, col){ SyntaxUnit.call(this, parts.join(" "), line, col, Parser.SELECTOR_TYPE); this.parts = parts; this.specificity = Specificity.calculate(this); } Selector.prototype = new SyntaxUnit(); Selector.prototype.constructor = Selector; function SelectorPart(elementName, modifiers, text, line, col){ SyntaxUnit.call(this, text, line, col, Parser.SELECTOR_PART_TYPE); this.elementName = elementName; this.modifiers = modifiers; } SelectorPart.prototype = new SyntaxUnit(); SelectorPart.prototype.constructor = SelectorPart; function SelectorSubPart(text, type, line, col){ SyntaxUnit.call(this, text, line, col, Parser.SELECTOR_SUB_PART_TYPE); this.type = type; this.args = []; } SelectorSubPart.prototype = new SyntaxUnit(); SelectorSubPart.prototype.constructor = SelectorSubPart; function Specificity(a, b, c, d){ this.a = a; this.b = b; this.c = c; this.d = d; } Specificity.prototype = { constructor: Specificity, compare: function(other){ var comps = ["a", "b", "c", "d"], i, len; for (i=0, len=comps.length; i < len; i++){ if (this[comps[i]] < other[comps[i]]){ return -1; } else if (this[comps[i]] > other[comps[i]]){ return 1; } } return 0; }, valueOf: function(){ return (this.a * 1000) + (this.b * 100) + (this.c * 10) + this.d; }, toString: function(){ return this.a + "," + this.b + "," + this.c + "," + this.d; } }; Specificity.calculate = function(selector){ var i, len, part, b=0, c=0, d=0; function updateValues(part){ var i, j, len, num, elementName = part.elementName ? part.elementName.text : "", modifier; if (elementName && elementName.charAt(elementName.length-1) != "*") { d++; } for (i=0, len=part.modifiers.length; i < len; i++){ modifier = part.modifiers[i]; switch(modifier.type){ case "class": case "attribute": c++; break; case "id": b++; break; case "pseudo": if (Pseudos.isElement(modifier.text)){ d++; } else { c++; } break; case "not": for (j=0, num=modifier.args.length; j < num; j++){ updateValues(modifier.args[j]); } } } } for (i=0, len=selector.parts.length; i < len; i++){ part = selector.parts[i]; if (part instanceof SelectorPart){ updateValues(part); } } return new Specificity(0, b, c, d); }; var h = /^[0-9a-fA-F]$/, nonascii = /^[\u0080-\uFFFF]$/, nl = /\n|\r\n|\r|\f/; function isHexDigit(c){ return c !== null && h.test(c); } function isDigit(c){ return c !== null && /\d/.test(c); } function isWhitespace(c){ return c !== null && /\s/.test(c); } function isNewLine(c){ return c !== null && nl.test(c); } function isNameStart(c){ return c !== null && (/[a-z_\u0080-\uFFFF\\]/i.test(c)); } function isNameChar(c){ return c !== null && (isNameStart(c) || /[0-9\-\\]/.test(c)); } function isIdentStart(c){ return c !== null && (isNameStart(c) || /\-\\/.test(c)); } function mix(receiver, supplier){ for (var prop in supplier){ if (supplier.hasOwnProperty(prop)){ receiver[prop] = supplier[prop]; } } return receiver; } function TokenStream(input){ TokenStreamBase.call(this, input, Tokens); } TokenStream.prototype = mix(new TokenStreamBase(), { _getToken: function(channel){ var c, reader = this._reader, token = null, startLine = reader.getLine(), startCol = reader.getCol(); c = reader.read(); while(c){ switch(c){ case "/": if(reader.peek() == "*"){ token = this.commentToken(c, startLine, startCol); } else { token = this.charToken(c, startLine, startCol); } break; case "|": case "~": case "^": case "$": case "*": if(reader.peek() == "="){ token = this.comparisonToken(c, startLine, startCol); } else { token = this.charToken(c, startLine, startCol); } break; case "\"": case "'": token = this.stringToken(c, startLine, startCol); break; case "#": if (isNameChar(reader.peek())){ token = this.hashToken(c, startLine, startCol); } else { token = this.charToken(c, startLine, startCol); } break; case ".": if (isDigit(reader.peek())){ token = this.numberToken(c, startLine, startCol); } else { token = this.charToken(c, startLine, startCol); } break; case "-": if (reader.peek() == "-"){ //could be closing HTML-style comment token = this.htmlCommentEndToken(c, startLine, startCol); } else if (isNameStart(reader.peek())){ token = this.identOrFunctionToken(c, startLine, startCol); } else { token = this.charToken(c, startLine, startCol); } break; case "!": token = this.importantToken(c, startLine, startCol); break; case "@": token = this.atRuleToken(c, startLine, startCol); break; case ":": token = this.notToken(c, startLine, startCol); break; case "<": token = this.htmlCommentStartToken(c, startLine, startCol); break; case "U": case "u": if (reader.peek() == "+"){ token = this.unicodeRangeToken(c, startLine, startCol); break; } default: if (isDigit(c)){ token = this.numberToken(c, startLine, startCol); } else if (isWhitespace(c)){ token = this.whitespaceToken(c, startLine, startCol); } else if (isIdentStart(c)){ token = this.identOrFunctionToken(c, startLine, startCol); } else { token = this.charToken(c, startLine, startCol); } } break; } if (!token && c === null){ token = this.createToken(Tokens.EOF,null,startLine,startCol); } return token; }, createToken: function(tt, value, startLine, startCol, options){ var reader = this._reader; options = options || {}; return { value: value, type: tt, channel: options.channel, endChar: options.endChar, hide: options.hide || false, startLine: startLine, startCol: startCol, endLine: reader.getLine(), endCol: reader.getCol() }; }, atRuleToken: function(first, startLine, startCol){ var rule = first, reader = this._reader, tt = Tokens.CHAR, valid = false, ident, c; reader.mark(); ident = this.readName(); rule = first + ident; tt = Tokens.type(rule.toLowerCase()); if (tt == Tokens.CHAR || tt == Tokens.UNKNOWN){ if (rule.length > 1){ tt = Tokens.UNKNOWN_SYM; } else { tt = Tokens.CHAR; rule = first; reader.reset(); } } return this.createToken(tt, rule, startLine, startCol); }, charToken: function(c, startLine, startCol){ var tt = Tokens.type(c); var opts = {}; if (tt == -1){ tt = Tokens.CHAR; } else { opts.endChar = Tokens[tt].endChar; } return this.createToken(tt, c, startLine, startCol, opts); }, commentToken: function(first, startLine, startCol){ var reader = this._reader, comment = this.readComment(first); return this.createToken(Tokens.COMMENT, comment, startLine, startCol); }, comparisonToken: function(c, startLine, startCol){ var reader = this._reader, comparison = c + reader.read(), tt = Tokens.type(comparison) || Tokens.CHAR; return this.createToken(tt, comparison, startLine, startCol); }, hashToken: function(first, startLine, startCol){ var reader = this._reader, name = this.readName(first); return this.createToken(Tokens.HASH, name, startLine, startCol); }, htmlCommentStartToken: function(first, startLine, startCol){ var reader = this._reader, text = first; reader.mark(); text += reader.readCount(3); if (text == "<!--"){ return this.createToken(Tokens.CDO, text, startLine, startCol); } else { reader.reset(); return this.charToken(first, startLine, startCol); } }, htmlCommentEndToken: function(first, startLine, startCol){ var reader = this._reader, text = first; reader.mark(); text += reader.readCount(2); if (text == "-->"){ return this.createToken(Tokens.CDC, text, startLine, startCol); } else { reader.reset(); return this.charToken(first, startLine, startCol); } }, identOrFunctionToken: function(first, startLine, startCol){ var reader = this._reader, ident = this.readName(first), tt = Tokens.IDENT; if (reader.peek() == "("){ ident += reader.read(); if (ident.toLowerCase() == "url("){ tt = Tokens.URI; ident = this.readURI(ident); if (ident.toLowerCase() == "url("){ tt = Tokens.FUNCTION; } } else { tt = Tokens.FUNCTION; } } else if (reader.peek() == ":"){ //might be an IE function if (ident.toLowerCase() == "progid"){ ident += reader.readTo("("); tt = Tokens.IE_FUNCTION; } } return this.createToken(tt, ident, startLine, startCol); }, importantToken: function(first, startLine, startCol){ var reader = this._reader, important = first, tt = Tokens.CHAR, temp, c; reader.mark(); c = reader.read(); while(c){ if (c == "/"){ if (reader.peek() != "*"){ break; } else { temp = this.readComment(c); if (temp === ""){ //broken! break; } } } else if (isWhitespace(c)){ important += c + this.readWhitespace(); } else if (/i/i.test(c)){ temp = reader.readCount(8); if (/mportant/i.test(temp)){ important += c + temp; tt = Tokens.IMPORTANT_SYM; } break; //we're done } else { break; } c = reader.read(); } if (tt == Tokens.CHAR){ reader.reset(); return this.charToken(first, startLine, startCol); } else { return this.createToken(tt, important, startLine, startCol); } }, notToken: function(first, startLine, startCol){ var reader = this._reader, text = first; reader.mark(); text += reader.readCount(4); if (text.toLowerCase() == ":not("){ return this.createToken(Tokens.NOT, text, startLine, startCol); } else { reader.reset(); return this.charToken(first, startLine, startCol); } }, numberToken: function(first, startLine, startCol){ var reader = this._reader, value = this.readNumber(first), ident, tt = Tokens.NUMBER, c = reader.peek(); if (isIdentStart(c)){ ident = this.readName(reader.read()); value += ident; if (/^em$|^ex$|^px$|^gd$|^rem$|^vw$|^vh$|^vmax$|^vmin$|^ch$|^cm$|^mm$|^in$|^pt$|^pc$/i.test(ident)){ tt = Tokens.LENGTH; } else if (/^deg|^rad$|^grad$/i.test(ident)){ tt = Tokens.ANGLE; } else if (/^ms$|^s$/i.test(ident)){ tt = Tokens.TIME; } else if (/^hz$|^khz$/i.test(ident)){ tt = Tokens.FREQ; } else if (/^dpi$|^dpcm$/i.test(ident)){ tt = Tokens.RESOLUTION; } else { tt = Tokens.DIMENSION; } } else if (c == "%"){ value += reader.read(); tt = Tokens.PERCENTAGE; } return this.createToken(tt, value, startLine, startCol); }, stringToken: function(first, startLine, startCol){ var delim = first, string = first, reader = this._reader, prev = first, tt = Tokens.STRING, c = reader.read(); while(c){ string += c; if (c == delim && prev != "\\"){ break; } if (isNewLine(reader.peek()) && c != "\\"){ tt = Tokens.INVALID; break; } prev = c; c = reader.read(); } if (c === null){ tt = Tokens.INVALID; } return this.createToken(tt, string, startLine, startCol); }, unicodeRangeToken: function(first, startLine, startCol){ var reader = this._reader, value = first, temp, tt = Tokens.CHAR; if (reader.peek() == "+"){ reader.mark(); value += reader.read(); value += this.readUnicodeRangePart(true); if (value.length == 2){ reader.reset(); } else { tt = Tokens.UNICODE_RANGE; if (value.indexOf("?") == -1){ if (reader.peek() == "-"){ reader.mark(); temp = reader.read(); temp += this.readUnicodeRangePart(false); if (temp.length == 1){ reader.reset(); } else { value += temp; } } } } } return this.createToken(tt, value, startLine, startCol); }, whitespaceToken: function(first, startLine, startCol){ var reader = this._reader, value = first + this.readWhitespace(); return this.createToken(Tokens.S, value, startLine, startCol); }, readUnicodeRangePart: function(allowQuestionMark){ var reader = this._reader, part = "", c = reader.peek(); while(isHexDigit(c) && part.length < 6){ reader.read(); part += c; c = reader.peek(); } if (allowQuestionMark){ while(c == "?" && part.length < 6){ reader.read(); part += c; c = reader.peek(); } } return part; }, readWhitespace: function(){ var reader = this._reader, whitespace = "", c = reader.peek(); while(isWhitespace(c)){ reader.read(); whitespace += c; c = reader.peek(); } return whitespace; }, readNumber: function(first){ var reader = this._reader, number = first, hasDot = (first == "."), c = reader.peek(); while(c){ if (isDigit(c)){ number += reader.read(); } else if (c == "."){ if (hasDot){ break; } else { hasDot = true; number += reader.read(); } } else { break; } c = reader.peek(); } return number; }, readString: function(){ var reader = this._reader, delim = reader.read(), string = delim, prev = delim, c = reader.peek(); while(c){ c = reader.read(); string += c; if (c == delim && prev != "\\"){ break; } if (isNewLine(reader.peek()) && c != "\\"){ string = ""; break; } prev = c; c = reader.peek(); } if (c === null){ string = ""; } return string; }, readURI: function(first){ var reader = this._reader, uri = first, inner = "", c = reader.peek(); reader.mark(); while(c && isWhitespace(c)){ reader.read(); c = reader.peek(); } if (c == "'" || c == "\""){ inner = this.readString(); } else { inner = this.readURL(); } c = reader.peek(); while(c && isWhitespace(c)){ reader.read(); c = reader.peek(); } if (inner === "" || c != ")"){ uri = first; reader.reset(); } else { uri += inner + reader.read(); } return uri; }, readURL: function(){ var reader = this._reader, url = "", c = reader.peek(); while (/^[!#$%&\\*-~]$/.test(c)){ url += reader.read(); c = reader.peek(); } return url; }, readName: function(first){ var reader = this._reader, ident = first || "", c = reader.peek(); while(true){ if (c == "\\"){ ident += this.readEscape(reader.read()); c = reader.peek(); } else if(c && isNameChar(c)){ ident += reader.read(); c = reader.peek(); } else { break; } } return ident; }, readEscape: function(first){ var reader = this._reader, cssEscape = first || "", i = 0, c = reader.peek(); if (isHexDigit(c)){ do { cssEscape += reader.read(); c = reader.peek(); } while(c && isHexDigit(c) && ++i < 6); } if (cssEscape.length == 3 && /\s/.test(c) || cssEscape.length == 7 || cssEscape.length == 1){ reader.read(); } else { c = ""; } return cssEscape + c; }, readComment: function(first){ var reader = this._reader, comment = first || "", c = reader.read(); if (c == "*"){ while(c){ comment += c; if (comment.length > 2 && c == "*" && reader.peek() == "/"){ comment += reader.read(); break; } c = reader.read(); } return comment; } else { return ""; } } }); var Tokens = [ { name: "CDO"}, { name: "CDC"}, { name: "S", whitespace: true/*, channel: "ws"*/}, { name: "COMMENT", comment: true, hide: true, channel: "comment" }, { name: "INCLUDES", text: "~="}, { name: "DASHMATCH", text: "|="}, { name: "PREFIXMATCH", text: "^="}, { name: "SUFFIXMATCH", text: "$="}, { name: "SUBSTRINGMATCH", text: "*="}, { name: "STRING"}, { name: "IDENT"}, { name: "HASH"}, { name: "IMPORT_SYM", text: "@import"}, { name: "PAGE_SYM", text: "@page"}, { name: "MEDIA_SYM", text: "@media"}, { name: "FONT_FACE_SYM", text: "@font-face"}, { name: "CHARSET_SYM", text: "@charset"}, { name: "NAMESPACE_SYM", text: "@namespace"}, { name: "VIEWPORT_SYM", text: ["@viewport", "@-ms-viewport"]}, { name: "UNKNOWN_SYM" }, { name: "KEYFRAMES_SYM", text: [ "@keyframes", "@-webkit-keyframes", "@-moz-keyframes", "@-o-keyframes" ] }, { name: "IMPORTANT_SYM"}, { name: "LENGTH"}, { name: "ANGLE"}, { name: "TIME"}, { name: "FREQ"}, { name: "DIMENSION"}, { name: "PERCENTAGE"}, { name: "NUMBER"}, { name: "URI"}, { name: "FUNCTION"}, { name: "UNICODE_RANGE"}, { name: "INVALID"}, { name: "PLUS", text: "+" }, { name: "GREATER", text: ">"}, { name: "COMMA", text: ","}, { name: "TILDE", text: "~"}, { name: "NOT"}, { name: "TOPLEFTCORNER_SYM", text: "@top-left-corner"}, { name: "TOPLEFT_SYM", text: "@top-left"}, { name: "TOPCENTER_SYM", text: "@top-center"}, { name: "TOPRIGHT_SYM", text: "@top-right"}, { name: "TOPRIGHTCORNER_SYM", text: "@top-right-corner"}, { name: "BOTTOMLEFTCORNER_SYM", text: "@bottom-left-corner"}, { name: "BOTTOMLEFT_SYM", text: "@bottom-left"}, { name: "BOTTOMCENTER_SYM", text: "@bottom-center"}, { name: "BOTTOMRIGHT_SYM", text: "@bottom-right"}, { name: "BOTTOMRIGHTCORNER_SYM", text: "@bottom-right-corner"}, { name: "LEFTTOP_SYM", text: "@left-top"}, { name: "LEFTMIDDLE_SYM", text: "@left-middle"}, { name: "LEFTBOTTOM_SYM", text: "@left-bottom"}, { name: "RIGHTTOP_SYM", text: "@right-top"}, { name: "RIGHTMIDDLE_SYM", text: "@right-middle"}, { name: "RIGHTBOTTOM_SYM", text: "@right-bottom"}, { name: "RESOLUTION", state: "media"}, { name: "IE_FUNCTION" }, { name: "CHAR" }, { name: "PIPE", text: "|" }, { name: "SLASH", text: "/" }, { name: "MINUS", text: "-" }, { name: "STAR", text: "*" }, { name: "LBRACE", endChar: "}", text: "{" }, { name: "RBRACE", text: "}" }, { name: "LBRACKET", endChar: "]", text: "[" }, { name: "RBRACKET", text: "]" }, { name: "EQUALS", text: "=" }, { name: "COLON", text: ":" }, { name: "SEMICOLON", text: ";" }, { name: "LPAREN", endChar: ")", text: "(" }, { name: "RPAREN", text: ")" }, { name: "DOT", text: "." } ]; (function(){ var nameMap = [], typeMap = {}; Tokens.UNKNOWN = -1; Tokens.unshift({name:"EOF"}); for (var i=0, len = Tokens.length; i < len; i++){ nameMap.push(Tokens[i].name); Tokens[Tokens[i].name] = i; if (Tokens[i].text){ if (Tokens[i].text instanceof Array){ for (var j=0; j < Tokens[i].text.length; j++){ typeMap[Tokens[i].text[j]] = i; } } else { typeMap[Tokens[i].text] = i; } } } Tokens.name = function(tt){ return nameMap[tt]; }; Tokens.type = function(c){ return typeMap[c] || -1; }; })(); var Validation = { validate: function(property, value){ var name = property.toString().toLowerCase(), parts = value.parts, expression = new PropertyValueIterator(value), spec = Properties[name], part, valid, j, count, msg, types, last, literals, max, multi, group; if (!spec) { if (name.indexOf("-") !== 0){ //vendor prefixed are ok throw new ValidationError("Unknown property '" + property + "'.", property.line, property.col); } } else if (typeof spec != "number"){ if (typeof spec == "string"){ if (spec.indexOf("||") > -1) { this.groupProperty(spec, expression); } else { this.singleProperty(spec, expression, 1); } } else if (spec.multi) { this.multiProperty(spec.multi, expression, spec.comma, spec.max || Infinity); } else if (typeof spec == "function") { spec(expression); } } }, singleProperty: function(types, expression, max, partial) { var result = false, value = expression.value, count = 0, part; while (expression.hasNext() && count < max) { result = ValidationTypes.isAny(expression, types); if (!result) { break; } count++; } if (!result) { if (expression.hasNext() && !expression.isFirst()) { part = expression.peek(); throw new ValidationError("Expected end of value but found '" + part + "'.", part.line, part.col); } else { throw new ValidationError("Expected (" + types + ") but found '" + value + "'.", value.line, value.col); } } else if (expression.hasNext()) { part = expression.next(); throw new ValidationError("Expected end of value but found '" + part + "'.", part.line, part.col); } }, multiProperty: function (types, expression, comma, max) { var result = false, value = expression.value, count = 0, sep = false, part; while(expression.hasNext() && !result && count < max) { if (ValidationTypes.isAny(expression, types)) { count++; if (!expression.hasNext()) { result = true; } else if (comma) { if (expression.peek() == ",") { part = expression.next(); } else { break; } } } else { break; } } if (!result) { if (expression.hasNext() && !expression.isFirst()) { part = expression.peek(); throw new ValidationError("Expected end of value but found '" + part + "'.", part.line, part.col); } else { part = expression.previous(); if (comma && part == ",") { throw new ValidationError("Expected end of value but found '" + part + "'.", part.line, part.col); } else { throw new ValidationError("Expected (" + types + ") but found '" + value + "'.", value.line, value.col); } } } else if (expression.hasNext()) { part = expression.next(); throw new ValidationError("Expected end of value but found '" + part + "'.", part.line, part.col); } }, groupProperty: function (types, expression, comma) { var result = false, value = expression.value, typeCount = types.split("||").length, groups = { count: 0 }, partial = false, name, part; while(expression.hasNext() && !result) { name = ValidationTypes.isAnyOfGroup(expression, types); if (name) { if (groups[name]) { break; } else { groups[name] = 1; groups.count++; partial = true; if (groups.count == typeCount || !expression.hasNext()) { result = true; } } } else { break; } } if (!result) { if (partial && expression.hasNext()) { part = expression.peek(); throw new ValidationError("Expected end of value but found '" + part + "'.", part.line, part.col); } else { throw new ValidationError("Expected (" + types + ") but found '" + value + "'.", value.line, value.col); } } else if (expression.hasNext()) { part = expression.next(); throw new ValidationError("Expected end of value but found '" + part + "'.", part.line, part.col); } } }; function ValidationError(message, line, col){ this.col = col; this.line = line; this.message = message; } ValidationError.prototype = new Error(); var ValidationTypes = { isLiteral: function (part, literals) { var text = part.text.toString().toLowerCase(), args = literals.split(" | "), i, len, found = false; for (i=0,len=args.length; i < len && !found; i++){ if (text == args[i].toLowerCase()){ found = true; } } return found; }, isSimple: function(type) { return !!this.simple[type]; }, isComplex: function(type) { return !!this.complex[type]; }, isAny: function (expression, types) { var args = types.split(" | "), i, len, found = false; for (i=0,len=args.length; i < len && !found && expression.hasNext(); i++){ found = this.isType(expression, args[i]); } return found; }, isAnyOfGroup: function(expression, types) { var args = types.split(" || "), i, len, found = false; for (i=0,len=args.length; i < len && !found; i++){ found = this.isType(expression, args[i]); } return found ? args[i-1] : false; }, isType: function (expression, type) { var part = expression.peek(), result = false; if (type.charAt(0) != "<") { result = this.isLiteral(part, type); if (result) { expression.next(); } } else if (this.simple[type]) { result = this.simple[type](part); if (result) { expression.next(); } } else { result = this.complex[type](expression); } return result; }, simple: { "<absolute-size>": function(part){ return ValidationTypes.isLiteral(part, "xx-small | x-small | small | medium | large | x-large | xx-large"); }, "<attachment>": function(part){ return ValidationTypes.isLiteral(part, "scroll | fixed | local"); }, "<attr>": function(part){ return part.type == "function" && part.name == "attr"; }, "<bg-image>": function(part){ return this["<image>"](part) || this["<gradient>"](part) || part == "none"; }, "<gradient>": function(part) { return part.type == "function" && /^(?:\-(?:ms|moz|o|webkit)\-)?(?:repeating\-)?(?:radial\-|linear\-)?gradient/i.test(part); }, "<box>": function(part){ return ValidationTypes.isLiteral(part, "padding-box | border-box | content-box"); }, "<content>": function(part){ return part.type == "function" && part.name == "content"; }, "<relative-size>": function(part){ return ValidationTypes.isLiteral(part, "smaller | larger"); }, "<ident>": function(part){ return part.type == "identifier"; }, "<length>": function(part){ if (part.type == "function" && /^(?:\-(?:ms|moz|o|webkit)\-)?calc/i.test(part)){ return true; }else{ return part.type == "length" || part.type == "number" || part.type == "integer" || part == "0"; } }, "<color>": function(part){ return part.type == "color" || part == "transparent"; }, "<number>": function(part){ return part.type == "number" || this["<integer>"](part); }, "<integer>": function(part){ return part.type == "integer"; }, "<line>": function(part){ return part.type == "integer"; }, "<angle>": function(part){ return part.type == "angle"; }, "<uri>": function(part){ return part.type == "uri"; }, "<image>": function(part){ return this["<uri>"](part); }, "<percentage>": function(part){ return part.type == "percentage" || part == "0"; }, "<border-width>": function(part){ return this["<length>"](part) || ValidationTypes.isLiteral(part, "thin | medium | thick"); }, "<border-style>": function(part){ return ValidationTypes.isLiteral(part, "none | hidden | dotted | dashed | solid | double | groove | ridge | inset | outset"); }, "<content-sizing>": function(part){ // http://www.w3.org/TR/css3-sizing/#width-height-keywords return ValidationTypes.isLiteral(part, "fill-available | -moz-available | -webkit-fill-available | max-content | -moz-max-content | -webkit-max-content | min-content | -moz-min-content | -webkit-min-content | fit-content | -moz-fit-content | -webkit-fit-content"); }, "<margin-width>": function(part){ return this["<length>"](part) || this["<percentage>"](part) || ValidationTypes.isLiteral(part, "auto"); }, "<padding-width>": function(part){ return this["<length>"](part) || this["<percentage>"](part); }, "<shape>": function(part){ return part.type == "function" && (part.name == "rect" || part.name == "inset-rect"); }, "<time>": function(part) { return part.type == "time"; }, "<flex-grow>": function(part){ return this["<number>"](part); }, "<flex-shrink>": function(part){ return this["<number>"](part); }, "<width>": function(part){ return this["<margin-width>"](part); }, "<flex-basis>": function(part){ return this["<width>"](part); }, "<flex-direction>": function(part){ return ValidationTypes.isLiteral(part, "row | row-reverse | column | column-reverse"); }, "<flex-wrap>": function(part){ return ValidationTypes.isLiteral(part, "nowrap | wrap | wrap-reverse"); } }, complex: { "<bg-position>": function(expression){ var types = this, result = false, numeric = "<percentage> | <length>", xDir = "left | right", yDir = "top | bottom", count = 0, hasNext = function() { return expression.hasNext() && expression.peek() != ","; }; while (expression.peek(count) && expression.peek(count) != ",") { count++; } if (count < 3) { if (ValidationTypes.isAny(expression, xDir + " | center | " + numeric)) { result = true; ValidationTypes.isAny(expression, yDir + " | center | " + numeric); } else if (ValidationTypes.isAny(expression, yDir)) { result = true; ValidationTypes.isAny(expression, xDir + " | center"); } } else { if (ValidationTypes.isAny(expression, xDir)) { if (ValidationTypes.isAny(expression, yDir)) { result = true; ValidationTypes.isAny(expression, numeric); } else if (ValidationTypes.isAny(expression, numeric)) { if (ValidationTypes.isAny(expression, yDir)) { result = true; ValidationTypes.isAny(expression, numeric); } else if (ValidationTypes.isAny(expression, "center")) { result = true; } } } else if (ValidationTypes.isAny(expression, yDir)) { if (ValidationTypes.isAny(expression, xDir)) { result = true; ValidationTypes.isAny(expression, numeric); } else if (ValidationTypes.isAny(expression, numeric)) { if (ValidationTypes.isAny(expression, xDir)) { result = true; ValidationTypes.isAny(expression, numeric); } else if (ValidationTypes.isAny(expression, "center")) { result = true; } } } else if (ValidationTypes.isAny(expression, "center")) { if (ValidationTypes.isAny(expression, xDir + " | " + yDir)) { result = true; ValidationTypes.isAny(expression, numeric); } } } return result; }, "<bg-size>": function(expression){ var types = this, result = false, numeric = "<percentage> | <length> | auto", part, i, len; if (ValidationTypes.isAny(expression, "cover | contain")) { result = true; } else if (ValidationTypes.isAny(expression, numeric)) { result = true; ValidationTypes.isAny(expression, numeric); } return result; }, "<repeat-style>": function(expression){ var result = false, values = "repeat | space | round | no-repeat", part; if (expression.hasNext()){ part = expression.next(); if (ValidationTypes.isLiteral(part, "repeat-x | repeat-y")) { result = true; } else if (ValidationTypes.isLiteral(part, values)) { result = true; if (expression.hasNext() && ValidationTypes.isLiteral(expression.peek(), values)) { expression.next(); } } } return result; }, "<shadow>": function(expression) { var result = false, count = 0, inset = false, color = false, part; if (expression.hasNext()) { if (ValidationTypes.isAny(expression, "inset")){ inset = true; } if (ValidationTypes.isAny(expression, "<color>")) { color = true; } while (ValidationTypes.isAny(expression, "<length>") && count < 4) { count++; } if (expression.hasNext()) { if (!color) { ValidationTypes.isAny(expression, "<color>"); } if (!inset) { ValidationTypes.isAny(expression, "inset"); } } result = (count >= 2 && count <= 4); } return result; }, "<x-one-radius>": function(expression) { var result = false, simple = "<length> | <percentage> | inherit"; if (ValidationTypes.isAny(expression, simple)){ result = true; ValidationTypes.isAny(expression, simple); } return result; }, "<flex>": function(expression) { var part, result = false; if (ValidationTypes.isAny(expression, "none | inherit")) { result = true; } else { if (ValidationTypes.isType(expression, "<flex-grow>")) { if (expression.peek()) { if (ValidationTypes.isType(expression, "<flex-shrink>")) { if (expression.peek()) { result = ValidationTypes.isType(expression, "<flex-basis>"); } else { result = true; } } else if (ValidationTypes.isType(expression, "<flex-basis>")) { result = expression.peek() === null; } } else { result = true; } } else if (ValidationTypes.isType(expression, "<flex-basis>")) { result = true; } } if (!result) { part = expression.peek(); throw new ValidationError("Expected (none | [ <flex-grow> <flex-shrink>? || <flex-basis> ]) but found '" + expression.value.text + "'.", part.line, part.col); } return result; } } }; parserlib.css = { Colors :Colors, Combinator :Combinator, Parser :Parser, PropertyName :PropertyName, PropertyValue :PropertyValue, PropertyValuePart :PropertyValuePart, MediaFeature :MediaFeature, MediaQuery :MediaQuery, Selector :Selector, SelectorPart :SelectorPart, SelectorSubPart :SelectorSubPart, Specificity :Specificity, TokenStream :TokenStream, Tokens :Tokens, ValidationError :ValidationError }; })(); (function(){ for(var prop in parserlib){ exports[prop] = parserlib[prop]; } })(); function objectToString(o) { return Object.prototype.toString.call(o); } var util = { isArray: function (ar) { return Array.isArray(ar) || (typeof ar === 'object' && objectToString(ar) === '[object Array]'); }, isDate: function (d) { return typeof d === 'object' && objectToString(d) === '[object Date]'; }, isRegExp: function (re) { return typeof re === 'object' && objectToString(re) === '[object RegExp]'; }, getRegExpFlags: function (re) { var flags = ''; re.global && (flags += 'g'); re.ignoreCase && (flags += 'i'); re.multiline && (flags += 'm'); return flags; } }; if (typeof module === 'object') module.exports = clone; function clone(parent, circular, depth, prototype) { var allParents = []; var allChildren = []; var useBuffer = typeof Buffer != 'undefined'; if (typeof circular == 'undefined') circular = true; if (typeof depth == 'undefined') depth = Infinity; function _clone(parent, depth) { if (parent === null) return null; if (depth == 0) return parent; var child; if (typeof parent != 'object') { return parent; } if (util.isArray(parent)) { child = []; } else if (util.isRegExp(parent)) { child = new RegExp(parent.source, util.getRegExpFlags(parent)); if (parent.lastIndex) child.lastIndex = parent.lastIndex; } else if (util.isDate(parent)) { child = new Date(parent.getTime()); } else if (useBuffer && Buffer.isBuffer(parent)) { child = new Buffer(parent.length); parent.copy(child); return child; } else { if (typeof prototype == 'undefined') child = Object.create(Object.getPrototypeOf(parent)); else child = Object.create(prototype); } if (circular) { var index = allParents.indexOf(parent); if (index != -1) { return allChildren[index]; } allParents.push(parent); allChildren.push(child); } for (var i in parent) { child[i] = _clone(parent[i], depth - 1); } return child; } return _clone(parent, depth); } clone.clonePrototype = function(parent) { if (parent === null) return null; var c = function () {}; c.prototype = parent; return new c(); }; var CSSLint = (function(){ var rules = [], formatters = [], embeddedRuleset = /\/\*csslint([^\*]*)\*\//, api = new parserlib.util.EventTarget(); api.version = "@VERSION@"; api.addRule = function(rule){ rules.push(rule); rules[rule.id] = rule; }; api.clearRules = function(){ rules = []; }; api.getRules = function(){ return [].concat(rules).sort(function(a,b){ return a.id > b.id ? 1 : 0; }); }; api.getRuleset = function() { var ruleset = {}, i = 0, len = rules.length; while (i < len){ ruleset[rules[i++].id] = 1; //by default, everything is a warning } return ruleset; }; function applyEmbeddedRuleset(text, ruleset){ var valueMap, embedded = text && text.match(embeddedRuleset), rules = embedded && embedded[1]; if (rules) { valueMap = { "true": 2, // true is error "": 1, // blank is warning "false": 0, // false is ignore "2": 2, // explicit error "1": 1, // explicit warning "0": 0 // explicit ignore }; rules.toLowerCase().split(",").forEach(function(rule){ var pair = rule.split(":"), property = pair[0] || "", value = pair[1] || ""; ruleset[property.trim()] = valueMap[value.trim()]; }); } return ruleset; } api.addFormatter = function(formatter) { formatters[formatter.id] = formatter; }; api.getFormatter = function(formatId){ return formatters[formatId]; }; api.format = function(results, filename, formatId, options) { var formatter = this.getFormatter(formatId), result = null; if (formatter){ result = formatter.startFormat(); result += formatter.formatResults(results, filename, options || {}); result += formatter.endFormat(); } return result; }; api.hasFormat = function(formatId){ return formatters.hasOwnProperty(formatId); }; api.verify = function(text, ruleset){ var i = 0, reporter, lines, report, parser = new parserlib.css.Parser({ starHack: true, ieFilters: true, underscoreHack: true, strict: false }); lines = text.replace(/\n\r?/g, "$split$").split("$split$"); if (!ruleset){ ruleset = this.getRuleset(); } if (embeddedRuleset.test(text)){ ruleset = clone(ruleset); ruleset = applyEmbeddedRuleset(text, ruleset); } reporter = new Reporter(lines, ruleset); ruleset.errors = 2; //always report parsing errors as errors for (i in ruleset){ if(ruleset.hasOwnProperty(i) && ruleset[i]){ if (rules[i]){ rules[i].init(parser, reporter); } } } try { parser.parse(text); } catch (ex) { reporter.error("Fatal error, cannot continue: " + ex.message, ex.line, ex.col, {}); } report = { messages : reporter.messages, stats : reporter.stats, ruleset : reporter.ruleset }; report.messages.sort(function (a, b){ if (a.rollup && !b.rollup){ return 1; } else if (!a.rollup && b.rollup){ return -1; } else { return a.line - b.line; } }); return report; }; return api; })(); function Reporter(lines, ruleset){ this.messages = []; this.stats = []; this.lines = lines; this.ruleset = ruleset; } Reporter.prototype = { constructor: Reporter, error: function(message, line, col, rule){ this.messages.push({ type : "error", line : line, col : col, message : message, evidence: this.lines[line-1], rule : rule || {} }); }, warn: function(message, line, col, rule){ this.report(message, line, col, rule); }, report: function(message, line, col, rule){ this.messages.push({ type : this.ruleset[rule.id] === 2 ? "error" : "warning", line : line, col : col, message : message, evidence: this.lines[line-1], rule : rule }); }, info: function(message, line, col, rule){ this.messages.push({ type : "info", line : line, col : col, message : message, evidence: this.lines[line-1], rule : rule }); }, rollupError: function(message, rule){ this.messages.push({ type : "error", rollup : true, message : message, rule : rule }); }, rollupWarn: function(message, rule){ this.messages.push({ type : "warning", rollup : true, message : message, rule : rule }); }, stat: function(name, value){ this.stats[name] = value; } }; CSSLint._Reporter = Reporter; CSSLint.Util = { mix: function(receiver, supplier){ var prop; for (prop in supplier){ if (supplier.hasOwnProperty(prop)){ receiver[prop] = supplier[prop]; } } return prop; }, indexOf: function(values, value){ if (values.indexOf){ return values.indexOf(value); } else { for (var i=0, len=values.length; i < len; i++){ if (values[i] === value){ return i; } } return -1; } }, forEach: function(values, func) { if (values.forEach){ return values.forEach(func); } else { for (var i=0, len=values.length; i < len; i++){ func(values[i], i, values); } } } }; CSSLint.addRule({ id: "adjoining-classes", name: "Disallow adjoining classes", desc: "Don't use adjoining classes.", browsers: "IE6", init: function(parser, reporter){ var rule = this; parser.addListener("startrule", function(event){ var selectors = event.selectors, selector, part, modifier, classCount, i, j, k; for (i=0; i < selectors.length; i++){ selector = selectors[i]; for (j=0; j < selector.parts.length; j++){ part = selector.parts[j]; if (part.type === parser.SELECTOR_PART_TYPE){ classCount = 0; for (k=0; k < part.modifiers.length; k++){ modifier = part.modifiers[k]; if (modifier.type === "class"){ classCount++; } if (classCount > 1){ reporter.report("Don't use adjoining classes.", part.line, part.col, rule); } } } } } }); } }); CSSLint.addRule({ id: "box-model", name: "Beware of broken box size", desc: "Don't use width or height when using padding or border.", browsers: "All", init: function(parser, reporter){ var rule = this, widthProperties = { border: 1, "border-left": 1, "border-right": 1, padding: 1, "padding-left": 1, "padding-right": 1 }, heightProperties = { border: 1, "border-bottom": 1, "border-top": 1, padding: 1, "padding-bottom": 1, "padding-top": 1 }, properties, boxSizing = false; function startRule(){ properties = {}; boxSizing = false; } function endRule(){ var prop, value; if (!boxSizing) { if (properties.height){ for (prop in heightProperties){ if (heightProperties.hasOwnProperty(prop) && properties[prop]){ value = properties[prop].value; if (!(prop === "padding" && value.parts.length === 2 && value.parts[0].value === 0)){ reporter.report("Using height with " + prop + " can sometimes make elements larger than you expect.", properties[prop].line, properties[prop].col, rule); } } } } if (properties.width){ for (prop in widthProperties){ if (widthProperties.hasOwnProperty(prop) && properties[prop]){ value = properties[prop].value; if (!(prop === "padding" && value.parts.length === 2 && value.parts[1].value === 0)){ reporter.report("Using width with " + prop + " can sometimes make elements larger than you expect.", properties[prop].line, properties[prop].col, rule); } } } } } } parser.addListener("startrule", startRule); parser.addListener("startfontface", startRule); parser.addListener("startpage", startRule); parser.addListener("startpagemargin", startRule); parser.addListener("startkeyframerule", startRule); parser.addListener("property", function(event){ var name = event.property.text.toLowerCase(); if (heightProperties[name] || widthProperties[name]){ if (!/^0\S*$/.test(event.value) && !(name === "border" && event.value.toString() === "none")){ properties[name] = { line: event.property.line, col: event.property.col, value: event.value }; } } else { if (/^(width|height)/i.test(name) && /^(length|percentage)/.test(event.value.parts[0].type)){ properties[name] = 1; } else if (name === "box-sizing") { boxSizing = true; } } }); parser.addListener("endrule", endRule); parser.addListener("endfontface", endRule); parser.addListener("endpage", endRule); parser.addListener("endpagemargin", endRule); parser.addListener("endkeyframerule", endRule); } }); CSSLint.addRule({ id: "box-sizing", name: "Disallow use of box-sizing", desc: "The box-sizing properties isn't supported in IE6 and IE7.", browsers: "IE6, IE7", tags: ["Compatibility"], init: function(parser, reporter){ var rule = this; parser.addListener("property", function(event){ var name = event.property.text.toLowerCase(); if (name === "box-sizing"){ reporter.report("The box-sizing property isn't supported in IE6 and IE7.", event.line, event.col, rule); } }); } }); CSSLint.addRule({ id: "bulletproof-font-face", name: "Use the bulletproof @font-face syntax", desc: "Use the bulletproof @font-face syntax to avoid 404's in old IE (http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax).", browsers: "All", init: function(parser, reporter){ var rule = this, fontFaceRule = false, firstSrc = true, ruleFailed = false, line, col; parser.addListener("startfontface", function(){ fontFaceRule = true; }); parser.addListener("property", function(event){ if (!fontFaceRule) { return; } var propertyName = event.property.toString().toLowerCase(), value = event.value.toString(); line = event.line; col = event.col; if (propertyName === "src") { var regex = /^\s?url\(['"].+\.eot\?.*['"]\)\s*format\(['"]embedded-opentype['"]\).*$/i; if (!value.match(regex) && firstSrc) { ruleFailed = true; firstSrc = false; } else if (value.match(regex) && !firstSrc) { ruleFailed = false; } } }); parser.addListener("endfontface", function(){ fontFaceRule = false; if (ruleFailed) { reporter.report("@font-face declaration doesn't follow the fontspring bulletproof syntax.", line, col, rule); } }); } }); CSSLint.addRule({ id: "compatible-vendor-prefixes", name: "Require compatible vendor prefixes", desc: "Include all compatible vendor prefixes to reach a wider range of users.", browsers: "All", init: function (parser, reporter) { var rule = this, compatiblePrefixes, properties, prop, variations, prefixed, i, len, inKeyFrame = false, arrayPush = Array.prototype.push, applyTo = []; compatiblePrefixes = { "animation" : "webkit moz", "animation-delay" : "webkit moz", "animation-direction" : "webkit moz", "animation-duration" : "webkit moz", "animation-fill-mode" : "webkit moz", "animation-iteration-count" : "webkit moz", "animation-name" : "webkit moz", "animation-play-state" : "webkit moz", "animation-timing-function" : "webkit moz", "appearance" : "webkit moz", "border-end" : "webkit moz", "border-end-color" : "webkit moz", "border-end-style" : "webkit moz", "border-end-width" : "webkit moz", "border-image" : "webkit moz o", "border-radius" : "webkit", "border-start" : "webkit moz", "border-start-color" : "webkit moz", "border-start-style" : "webkit moz", "border-start-width" : "webkit moz", "box-align" : "webkit moz ms", "box-direction" : "webkit moz ms", "box-flex" : "webkit moz ms", "box-lines" : "webkit ms", "box-ordinal-group" : "webkit moz ms", "box-orient" : "webkit moz ms", "box-pack" : "webkit moz ms", "box-sizing" : "webkit moz", "box-shadow" : "webkit moz", "column-count" : "webkit moz ms", "column-gap" : "webkit moz ms", "column-rule" : "webkit moz ms", "column-rule-color" : "webkit moz ms", "column-rule-style" : "webkit moz ms", "column-rule-width" : "webkit moz ms", "column-width" : "webkit moz ms", "hyphens" : "epub moz", "line-break" : "webkit ms", "margin-end" : "webkit moz", "margin-start" : "webkit moz", "marquee-speed" : "webkit wap", "marquee-style" : "webkit wap", "padding-end" : "webkit moz", "padding-start" : "webkit moz", "tab-size" : "moz o", "text-size-adjust" : "webkit ms", "transform" : "webkit moz ms o", "transform-origin" : "webkit moz ms o", "transition" : "webkit moz o", "transition-delay" : "webkit moz o", "transition-duration" : "webkit moz o", "transition-property" : "webkit moz o", "transition-timing-function" : "webkit moz o", "user-modify" : "webkit moz", "user-select" : "webkit moz ms", "word-break" : "epub ms", "writing-mode" : "epub ms" }; for (prop in compatiblePrefixes) { if (compatiblePrefixes.hasOwnProperty(prop)) { variations = []; prefixed = compatiblePrefixes[prop].split(" "); for (i = 0, len = prefixed.length; i < len; i++) { variations.push("-" + prefixed[i] + "-" + prop); } compatiblePrefixes[prop] = variations; arrayPush.apply(applyTo, variations); } } parser.addListener("startrule", function () { properties = []; }); parser.addListener("startkeyframes", function (event) { inKeyFrame = event.prefix || true; }); parser.addListener("endkeyframes", function () { inKeyFrame = false; }); parser.addListener("property", function (event) { var name = event.property; if (CSSLint.Util.indexOf(applyTo, name.text) > -1) { if (!inKeyFrame || typeof inKeyFrame !== "string" || name.text.indexOf("-" + inKeyFrame + "-") !== 0) { properties.push(name); } } }); parser.addListener("endrule", function () { if (!properties.length) { return; } var propertyGroups = {}, i, len, name, prop, variations, value, full, actual, item, propertiesSpecified; for (i = 0, len = properties.length; i < len; i++) { name = properties[i]; for (prop in compatiblePrefixes) { if (compatiblePrefixes.hasOwnProperty(prop)) { variations = compatiblePrefixes[prop]; if (CSSLint.Util.indexOf(variations, name.text) > -1) { if (!propertyGroups[prop]) { propertyGroups[prop] = { full : variations.slice(0), actual : [], actualNodes: [] }; } if (CSSLint.Util.indexOf(propertyGroups[prop].actual, name.text) === -1) { propertyGroups[prop].actual.push(name.text); propertyGroups[prop].actualNodes.push(name); } } } } } for (prop in propertyGroups) { if (propertyGroups.hasOwnProperty(prop)) { value = propertyGroups[prop]; full = value.full; actual = value.actual; if (full.length > actual.length) { for (i = 0, len = full.length; i < len; i++) { item = full[i]; if (CSSLint.Util.indexOf(actual, item) === -1) { propertiesSpecified = (actual.length === 1) ? actual[0] : (actual.length === 2) ? actual.join(" and ") : actual.join(", "); reporter.report("The property " + item + " is compatible with " + propertiesSpecified + " and should be included as well.", value.actualNodes[0].line, value.actualNodes[0].col, rule); } } } } } }); } }); CSSLint.addRule({ id: "display-property-grouping", name: "Require properties appropriate for display", desc: "Certain properties shouldn't be used with certain display property values.", browsers: "All", init: function(parser, reporter){ var rule = this; var propertiesToCheck = { display: 1, "float": "none", height: 1, width: 1, margin: 1, "margin-left": 1, "margin-right": 1, "margin-bottom": 1, "margin-top": 1, padding: 1, "padding-left": 1, "padding-right": 1, "padding-bottom": 1, "padding-top": 1, "vertical-align": 1 }, properties; function reportProperty(name, display, msg){ if (properties[name]){ if (typeof propertiesToCheck[name] !== "string" || properties[name].value.toLowerCase() !== propertiesToCheck[name]){ reporter.report(msg || name + " can't be used with display: " + display + ".", properties[name].line, properties[name].col, rule); } } } function startRule(){ properties = {}; } function endRule(){ var display = properties.display ? properties.display.value : null; if (display){ switch(display){ case "inline": reportProperty("height", display); reportProperty("width", display); reportProperty("margin", display); reportProperty("margin-top", display); reportProperty("margin-bottom", display); reportProperty("float", display, "display:inline has no effect on floated elements (but may be used to fix the IE6 double-margin bug)."); break; case "block": reportProperty("vertical-align", display); break; case "inline-block": reportProperty("float", display); break; default: if (display.indexOf("table-") === 0){ reportProperty("margin", display); reportProperty("margin-left", display); reportProperty("margin-right", display); reportProperty("margin-top", display); reportProperty("margin-bottom", display); reportProperty("float", display); } } } } parser.addListener("startrule", startRule); parser.addListener("startfontface", startRule); parser.addListener("startkeyframerule", startRule); parser.addListener("startpagemargin", startRule); parser.addListener("startpage", startRule); parser.addListener("property", function(event){ var name = event.property.text.toLowerCase(); if (propertiesToCheck[name]){ properties[name] = { value: event.value.text, line: event.property.line, col: event.property.col }; } }); parser.addListener("endrule", endRule); parser.addListener("endfontface", endRule); parser.addListener("endkeyframerule", endRule); parser.addListener("endpagemargin", endRule); parser.addListener("endpage", endRule); } }); CSSLint.addRule({ id: "duplicate-background-images", name: "Disallow duplicate background images", desc: "Every background-image should be unique. Use a common class for e.g. sprites.", browsers: "All", init: function(parser, reporter){ var rule = this, stack = {}; parser.addListener("property", function(event){ var name = event.property.text, value = event.value, i, len; if (name.match(/background/i)) { for (i=0, len=value.parts.length; i < len; i++) { if (value.parts[i].type === "uri") { if (typeof stack[value.parts[i].uri] === "undefined") { stack[value.parts[i].uri] = event; } else { reporter.report("Background image '" + value.parts[i].uri + "' was used multiple times, first declared at line " + stack[value.parts[i].uri].line + ", col " + stack[value.parts[i].uri].col + ".", event.line, event.col, rule); } } } } }); } }); CSSLint.addRule({ id: "duplicate-properties", name: "Disallow duplicate properties", desc: "Duplicate properties must appear one after the other.", browsers: "All", init: function(parser, reporter){ var rule = this, properties, lastProperty; function startRule(){ properties = {}; } parser.addListener("startrule", startRule); parser.addListener("startfontface", startRule); parser.addListener("startpage", startRule); parser.addListener("startpagemargin", startRule); parser.addListener("startkeyframerule", startRule); parser.addListener("property", function(event){ var property = event.property, name = property.text.toLowerCase(); if (properties[name] && (lastProperty !== name || properties[name] === event.value.text)){ reporter.report("Duplicate property '" + event.property + "' found.", event.line, event.col, rule); } properties[name] = event.value.text; lastProperty = name; }); } }); CSSLint.addRule({ id: "empty-rules", name: "Disallow empty rules", desc: "Rules without any properties specified should be removed.", browsers: "All", init: function(parser, reporter){ var rule = this, count = 0; parser.addListener("startrule", function(){ count=0; }); parser.addListener("property", function(){ count++; }); parser.addListener("endrule", function(event){ var selectors = event.selectors; if (count === 0){ reporter.report("Rule is empty.", selectors[0].line, selectors[0].col, rule); } }); } }); CSSLint.addRule({ id: "errors", name: "Parsing Errors", desc: "This rule looks for recoverable syntax errors.", browsers: "All", init: function(parser, reporter){ var rule = this; parser.addListener("error", function(event){ reporter.error(event.message, event.line, event.col, rule); }); } }); CSSLint.addRule({ id: "fallback-colors", name: "Require fallback colors", desc: "For older browsers that don't support RGBA, HSL, or HSLA, provide a fallback color.", browsers: "IE6,IE7,IE8", init: function(parser, reporter){ var rule = this, lastProperty, propertiesToCheck = { color: 1, background: 1, "border-color": 1, "border-top-color": 1, "border-right-color": 1, "border-bottom-color": 1, "border-left-color": 1, border: 1, "border-top": 1, "border-right": 1, "border-bottom": 1, "border-left": 1, "background-color": 1 }, properties; function startRule(){ properties = {}; lastProperty = null; } parser.addListener("startrule", startRule); parser.addListener("startfontface", startRule); parser.addListener("startpage", startRule); parser.addListener("startpagemargin", startRule); parser.addListener("startkeyframerule", startRule); parser.addListener("property", function(event){ var property = event.property, name = property.text.toLowerCase(), parts = event.value.parts, i = 0, colorType = "", len = parts.length; if(propertiesToCheck[name]){ while(i < len){ if (parts[i].type === "color"){ if ("alpha" in parts[i] || "hue" in parts[i]){ if (/([^\)]+)\(/.test(parts[i])){ colorType = RegExp.$1.toUpperCase(); } if (!lastProperty || (lastProperty.property.text.toLowerCase() !== name || lastProperty.colorType !== "compat")){ reporter.report("Fallback " + name + " (hex or RGB) should precede " + colorType + " " + name + ".", event.line, event.col, rule); } } else { event.colorType = "compat"; } } i++; } } lastProperty = event; }); } }); CSSLint.addRule({ id: "floats", name: "Disallow too many floats", desc: "This rule tests if the float property is used too many times", browsers: "All", init: function(parser, reporter){ var rule = this; var count = 0; parser.addListener("property", function(event){ if (event.property.text.toLowerCase() === "float" && event.value.text.toLowerCase() !== "none"){ count++; } }); parser.addListener("endstylesheet", function(){ reporter.stat("floats", count); if (count >= 10){ reporter.rollupWarn("Too many floats (" + count + "), you're probably using them for layout. Consider using a grid system instead.", rule); } }); } }); CSSLint.addRule({ id: "font-faces", name: "Don't use too many web fonts", desc: "Too many different web fonts in the same stylesheet.", browsers: "All", init: function(parser, reporter){ var rule = this, count = 0; parser.addListener("startfontface", function(){ count++; }); parser.addListener("endstylesheet", function(){ if (count > 5){ reporter.rollupWarn("Too many @font-face declarations (" + count + ").", rule); } }); } }); CSSLint.addRule({ id: "font-sizes", name: "Disallow too many font sizes", desc: "Checks the number of font-size declarations.", browsers: "All", init: function(parser, reporter){ var rule = this, count = 0; parser.addListener("property", function(event){ if (event.property.toString() === "font-size"){ count++; } }); parser.addListener("endstylesheet", function(){ reporter.stat("font-sizes", count); if (count >= 10){ reporter.rollupWarn("Too many font-size declarations (" + count + "), abstraction needed.", rule); } }); } }); CSSLint.addRule({ id: "gradients", name: "Require all gradient definitions", desc: "When using a vendor-prefixed gradient, make sure to use them all.", browsers: "All", init: function(parser, reporter){ var rule = this, gradients; parser.addListener("startrule", function(){ gradients = { moz: 0, webkit: 0, oldWebkit: 0, o: 0 }; }); parser.addListener("property", function(event){ if (/\-(moz|o|webkit)(?:\-(?:linear|radial))\-gradient/i.test(event.value)){ gradients[RegExp.$1] = 1; } else if (/\-webkit\-gradient/i.test(event.value)){ gradients.oldWebkit = 1; } }); parser.addListener("endrule", function(event){ var missing = []; if (!gradients.moz){ missing.push("Firefox 3.6+"); } if (!gradients.webkit){ missing.push("Webkit (Safari 5+, Chrome)"); } if (!gradients.oldWebkit){ missing.push("Old Webkit (Safari 4+, Chrome)"); } if (!gradients.o){ missing.push("Opera 11.1+"); } if (missing.length && missing.length < 4){ reporter.report("Missing vendor-prefixed CSS gradients for " + missing.join(", ") + ".", event.selectors[0].line, event.selectors[0].col, rule); } }); } }); CSSLint.addRule({ id: "ids", name: "Disallow IDs in selectors", desc: "Selectors should not contain IDs.", browsers: "All", init: function(parser, reporter){ var rule = this; parser.addListener("startrule", function(event){ var selectors = event.selectors, selector, part, modifier, idCount, i, j, k; for (i=0; i < selectors.length; i++){ selector = selectors[i]; idCount = 0; for (j=0; j < selector.parts.length; j++){ part = selector.parts[j]; if (part.type === parser.SELECTOR_PART_TYPE){ for (k=0; k < part.modifiers.length; k++){ modifier = part.modifiers[k]; if (modifier.type === "id"){ idCount++; } } } } if (idCount === 1){ reporter.report("Don't use IDs in selectors.", selector.line, selector.col, rule); } else if (idCount > 1){ reporter.report(idCount + " IDs in the selector, really?", selector.line, selector.col, rule); } } }); } }); CSSLint.addRule({ id: "import", name: "Disallow @import", desc: "Don't use @import, use <link> instead.", browsers: "All", init: function(parser, reporter){ var rule = this; parser.addListener("import", function(event){ reporter.report("@import prevents parallel downloads, use <link> instead.", event.line, event.col, rule); }); } }); CSSLint.addRule({ id: "important", name: "Disallow !important", desc: "Be careful when using !important declaration", browsers: "All", init: function(parser, reporter){ var rule = this, count = 0; parser.addListener("property", function(event){ if (event.important === true){ count++; reporter.report("Use of !important", event.line, event.col, rule); } }); parser.addListener("endstylesheet", function(){ reporter.stat("important", count); if (count >= 10){ reporter.rollupWarn("Too many !important declarations (" + count + "), try to use less than 10 to avoid specificity issues.", rule); } }); } }); CSSLint.addRule({ id: "known-properties", name: "Require use of known properties", desc: "Properties should be known (listed in CSS3 specification) or be a vendor-prefixed property.", browsers: "All", init: function(parser, reporter){ var rule = this; parser.addListener("property", function(event){ if (event.invalid) { reporter.report(event.invalid.message, event.line, event.col, rule); } }); } }); CSSLint.addRule({ id: "order-alphabetical", name: "Alphabetical order", desc: "Assure properties are in alphabetical order", browsers: "All", init: function(parser, reporter){ var rule = this, properties; var startRule = function () { properties = []; }; parser.addListener("startrule", startRule); parser.addListener("startfontface", startRule); parser.addListener("startpage", startRule); parser.addListener("startpagemargin", startRule); parser.addListener("startkeyframerule", startRule); parser.addListener("property", function(event){ var name = event.property.text, lowerCasePrefixLessName = name.toLowerCase().replace(/^-.*?-/, ""); properties.push(lowerCasePrefixLessName); }); parser.addListener("endrule", function(event){ var currentProperties = properties.join(","), expectedProperties = properties.sort().join(","); if (currentProperties !== expectedProperties){ reporter.report("Rule doesn't have all its properties in alphabetical ordered.", event.line, event.col, rule); } }); } }); CSSLint.addRule({ id: "outline-none", name: "Disallow outline: none", desc: "Use of outline: none or outline: 0 should be limited to :focus rules.", browsers: "All", tags: ["Accessibility"], init: function(parser, reporter){ var rule = this, lastRule; function startRule(event){ if (event.selectors){ lastRule = { line: event.line, col: event.col, selectors: event.selectors, propCount: 0, outline: false }; } else { lastRule = null; } } function endRule(){ if (lastRule){ if (lastRule.outline){ if (lastRule.selectors.toString().toLowerCase().indexOf(":focus") === -1){ reporter.report("Outlines should only be modified using :focus.", lastRule.line, lastRule.col, rule); } else if (lastRule.propCount === 1) { reporter.report("Outlines shouldn't be hidden unless other visual changes are made.", lastRule.line, lastRule.col, rule); } } } } parser.addListener("startrule", startRule); parser.addListener("startfontface", startRule); parser.addListener("startpage", startRule); parser.addListener("startpagemargin", startRule); parser.addListener("startkeyframerule", startRule); parser.addListener("property", function(event){ var name = event.property.text.toLowerCase(), value = event.value; if (lastRule){ lastRule.propCount++; if (name === "outline" && (value.toString() === "none" || value.toString() === "0")){ lastRule.outline = true; } } }); parser.addListener("endrule", endRule); parser.addListener("endfontface", endRule); parser.addListener("endpage", endRule); parser.addListener("endpagemargin", endRule); parser.addListener("endkeyframerule", endRule); } }); CSSLint.addRule({ id: "overqualified-elements", name: "Disallow overqualified elements", desc: "Don't use classes or IDs with elements (a.foo or a#foo).", browsers: "All", init: function(parser, reporter){ var rule = this, classes = {}; parser.addListener("startrule", function(event){ var selectors = event.selectors, selector, part, modifier, i, j, k; for (i=0; i < selectors.length; i++){ selector = selectors[i]; for (j=0; j < selector.parts.length; j++){ part = selector.parts[j]; if (part.type === parser.SELECTOR_PART_TYPE){ for (k=0; k < part.modifiers.length; k++){ modifier = part.modifiers[k]; if (part.elementName && modifier.type === "id"){ reporter.report("Element (" + part + ") is overqualified, just use " + modifier + " without element name.", part.line, part.col, rule); } else if (modifier.type === "class"){ if (!classes[modifier]){ classes[modifier] = []; } classes[modifier].push({ modifier: modifier, part: part }); } } } } } }); parser.addListener("endstylesheet", function(){ var prop; for (prop in classes){ if (classes.hasOwnProperty(prop)){ if (classes[prop].length === 1 && classes[prop][0].part.elementName){ reporter.report("Element (" + classes[prop][0].part + ") is overqualified, just use " + classes[prop][0].modifier + " without element name.", classes[prop][0].part.line, classes[prop][0].part.col, rule); } } } }); } }); CSSLint.addRule({ id: "qualified-headings", name: "Disallow qualified headings", desc: "Headings should not be qualified (namespaced).", browsers: "All", init: function(parser, reporter){ var rule = this; parser.addListener("startrule", function(event){ var selectors = event.selectors, selector, part, i, j; for (i=0; i < selectors.length; i++){ selector = selectors[i]; for (j=0; j < selector.parts.length; j++){ part = selector.parts[j]; if (part.type === parser.SELECTOR_PART_TYPE){ if (part.elementName && /h[1-6]/.test(part.elementName.toString()) && j > 0){ reporter.report("Heading (" + part.elementName + ") should not be qualified.", part.line, part.col, rule); } } } } }); } }); CSSLint.addRule({ id: "regex-selectors", name: "Disallow selectors that look like regexs", desc: "Selectors that look like regular expressions are slow and should be avoided.", browsers: "All", init: function(parser, reporter){ var rule = this; parser.addListener("startrule", function(event){ var selectors = event.selectors, selector, part, modifier, i, j, k; for (i=0; i < selectors.length; i++){ selector = selectors[i]; for (j=0; j < selector.parts.length; j++){ part = selector.parts[j]; if (part.type === parser.SELECTOR_PART_TYPE){ for (k=0; k < part.modifiers.length; k++){ modifier = part.modifiers[k]; if (modifier.type === "attribute"){ if (/([\~\|\^\$\*]=)/.test(modifier)){ reporter.report("Attribute selectors with " + RegExp.$1 + " are slow!", modifier.line, modifier.col, rule); } } } } } } }); } }); CSSLint.addRule({ id: "rules-count", name: "Rules Count", desc: "Track how many rules there are.", browsers: "All", init: function(parser, reporter){ var count = 0; parser.addListener("startrule", function(){ count++; }); parser.addListener("endstylesheet", function(){ reporter.stat("rule-count", count); }); } }); CSSLint.addRule({ id: "selector-max-approaching", name: "Warn when approaching the 4095 selector limit for IE", desc: "Will warn when selector count is >= 3800 selectors.", browsers: "IE", init: function(parser, reporter) { var rule = this, count = 0; parser.addListener("startrule", function(event) { count += event.selectors.length; }); parser.addListener("endstylesheet", function() { if (count >= 3800) { reporter.report("You have " + count + " selectors. Internet Explorer supports a maximum of 4095 selectors per stylesheet. Consider refactoring.",0,0,rule); } }); } }); CSSLint.addRule({ id: "selector-max", name: "Error when past the 4095 selector limit for IE", desc: "Will error when selector count is > 4095.", browsers: "IE", init: function(parser, reporter){ var rule = this, count = 0; parser.addListener("startrule", function(event) { count += event.selectors.length; }); parser.addListener("endstylesheet", function() { if (count > 4095) { reporter.report("You have " + count + " selectors. Internet Explorer supports a maximum of 4095 selectors per stylesheet. Consider refactoring.",0,0,rule); } }); } }); CSSLint.addRule({ id: "selector-newline", name: "Disallow new-line characters in selectors", desc: "New-line characters in selectors are usually a forgotten comma and not a descendant combinator.", browsers: "All", init: function(parser, reporter) { var rule = this; function startRule(event) { var i, len, selector, p, n, pLen, part, part2, type, currentLine, nextLine, selectors = event.selectors; for (i = 0, len = selectors.length; i < len; i++) { selector = selectors[i]; for (p = 0, pLen = selector.parts.length; p < pLen; p++) { for (n = p + 1; n < pLen; n++) { part = selector.parts[p]; part2 = selector.parts[n]; type = part.type; currentLine = part.line; nextLine = part2.line; if (type === "descendant" && nextLine > currentLine) { reporter.report("newline character found in selector (forgot a comma?)", currentLine, selectors[i].parts[0].col, rule); } } } } } parser.addListener("startrule", startRule); } }); CSSLint.addRule({ id: "shorthand", name: "Require shorthand properties", desc: "Use shorthand properties where possible.", browsers: "All", init: function(parser, reporter){ var rule = this, prop, i, len, propertiesToCheck = {}, properties, mapping = { "margin": [ "margin-top", "margin-bottom", "margin-left", "margin-right" ], "padding": [ "padding-top", "padding-bottom", "padding-left", "padding-right" ] }; for (prop in mapping){ if (mapping.hasOwnProperty(prop)){ for (i=0, len=mapping[prop].length; i < len; i++){ propertiesToCheck[mapping[prop][i]] = prop; } } } function startRule(){ properties = {}; } function endRule(event){ var prop, i, len, total; for (prop in mapping){ if (mapping.hasOwnProperty(prop)){ total=0; for (i=0, len=mapping[prop].length; i < len; i++){ total += properties[mapping[prop][i]] ? 1 : 0; } if (total === mapping[prop].length){ reporter.report("The properties " + mapping[prop].join(", ") + " can be replaced by " + prop + ".", event.line, event.col, rule); } } } } parser.addListener("startrule", startRule); parser.addListener("startfontface", startRule); parser.addListener("property", function(event){ var name = event.property.toString().toLowerCase(); if (propertiesToCheck[name]){ properties[name] = 1; } }); parser.addListener("endrule", endRule); parser.addListener("endfontface", endRule); } }); CSSLint.addRule({ id: "star-property-hack", name: "Disallow properties with a star prefix", desc: "Checks for the star property hack (targets IE6/7)", browsers: "All", init: function(parser, reporter){ var rule = this; parser.addListener("property", function(event){ var property = event.property; if (property.hack === "*") { reporter.report("Property with star prefix found.", event.property.line, event.property.col, rule); } }); } }); CSSLint.addRule({ id: "text-indent", name: "Disallow negative text-indent", desc: "Checks for text indent less than -99px", browsers: "All", init: function(parser, reporter){ var rule = this, textIndent, direction; function startRule(){ textIndent = false; direction = "inherit"; } function endRule(){ if (textIndent && direction !== "ltr"){ reporter.report("Negative text-indent doesn't work well with RTL. If you use text-indent for image replacement explicitly set direction for that item to ltr.", textIndent.line, textIndent.col, rule); } } parser.addListener("startrule", startRule); parser.addListener("startfontface", startRule); parser.addListener("property", function(event){ var name = event.property.toString().toLowerCase(), value = event.value; if (name === "text-indent" && value.parts[0].value < -99){ textIndent = event.property; } else if (name === "direction" && value.toString() === "ltr"){ direction = "ltr"; } }); parser.addListener("endrule", endRule); parser.addListener("endfontface", endRule); } }); CSSLint.addRule({ id: "underscore-property-hack", name: "Disallow properties with an underscore prefix", desc: "Checks for the underscore property hack (targets IE6)", browsers: "All", init: function(parser, reporter){ var rule = this; parser.addListener("property", function(event){ var property = event.property; if (property.hack === "_") { reporter.report("Property with underscore prefix found.", event.property.line, event.property.col, rule); } }); } }); CSSLint.addRule({ id: "unique-headings", name: "Headings should only be defined once", desc: "Headings should be defined only once.", browsers: "All", init: function(parser, reporter){ var rule = this; var headings = { h1: 0, h2: 0, h3: 0, h4: 0, h5: 0, h6: 0 }; parser.addListener("startrule", function(event){ var selectors = event.selectors, selector, part, pseudo, i, j; for (i=0; i < selectors.length; i++){ selector = selectors[i]; part = selector.parts[selector.parts.length-1]; if (part.elementName && /(h[1-6])/i.test(part.elementName.toString())){ for (j=0; j < part.modifiers.length; j++){ if (part.modifiers[j].type === "pseudo"){ pseudo = true; break; } } if (!pseudo){ headings[RegExp.$1]++; if (headings[RegExp.$1] > 1) { reporter.report("Heading (" + part.elementName + ") has already been defined.", part.line, part.col, rule); } } } } }); parser.addListener("endstylesheet", function(){ var prop, messages = []; for (prop in headings){ if (headings.hasOwnProperty(prop)){ if (headings[prop] > 1){ messages.push(headings[prop] + " " + prop + "s"); } } } if (messages.length){ reporter.rollupWarn("You have " + messages.join(", ") + " defined in this stylesheet.", rule); } }); } }); CSSLint.addRule({ id: "universal-selector", name: "Disallow universal selector", desc: "The universal selector (*) is known to be slow.", browsers: "All", init: function(parser, reporter){ var rule = this; parser.addListener("startrule", function(event){ var selectors = event.selectors, selector, part, i; for (i=0; i < selectors.length; i++){ selector = selectors[i]; part = selector.parts[selector.parts.length-1]; if (part.elementName === "*"){ reporter.report(rule.desc, part.line, part.col, rule); } } }); } }); CSSLint.addRule({ id: "unqualified-attributes", name: "Disallow unqualified attribute selectors", desc: "Unqualified attribute selectors are known to be slow.", browsers: "All", init: function(parser, reporter){ var rule = this; parser.addListener("startrule", function(event){ var selectors = event.selectors, selector, part, modifier, i, k; for (i=0; i < selectors.length; i++){ selector = selectors[i]; part = selector.parts[selector.parts.length-1]; if (part.type === parser.SELECTOR_PART_TYPE){ for (k=0; k < part.modifiers.length; k++){ modifier = part.modifiers[k]; if (modifier.type === "attribute" && (!part.elementName || part.elementName === "*")){ reporter.report(rule.desc, part.line, part.col, rule); } } } } }); } }); CSSLint.addRule({ id: "vendor-prefix", name: "Require standard property with vendor prefix", desc: "When using a vendor-prefixed property, make sure to include the standard one.", browsers: "All", init: function(parser, reporter){ var rule = this, properties, num, propertiesToCheck = { "-webkit-border-radius": "border-radius", "-webkit-border-top-left-radius": "border-top-left-radius", "-webkit-border-top-right-radius": "border-top-right-radius", "-webkit-border-bottom-left-radius": "border-bottom-left-radius", "-webkit-border-bottom-right-radius": "border-bottom-right-radius", "-o-border-radius": "border-radius", "-o-border-top-left-radius": "border-top-left-radius", "-o-border-top-right-radius": "border-top-right-radius", "-o-border-bottom-left-radius": "border-bottom-left-radius", "-o-border-bottom-right-radius": "border-bottom-right-radius", "-moz-border-radius": "border-radius", "-moz-border-radius-topleft": "border-top-left-radius", "-moz-border-radius-topright": "border-top-right-radius", "-moz-border-radius-bottomleft": "border-bottom-left-radius", "-moz-border-radius-bottomright": "border-bottom-right-radius", "-moz-column-count": "column-count", "-webkit-column-count": "column-count", "-moz-column-gap": "column-gap", "-webkit-column-gap": "column-gap", "-moz-column-rule": "column-rule", "-webkit-column-rule": "column-rule", "-moz-column-rule-style": "column-rule-style", "-webkit-column-rule-style": "column-rule-style", "-moz-column-rule-color": "column-rule-color", "-webkit-column-rule-color": "column-rule-color", "-moz-column-rule-width": "column-rule-width", "-webkit-column-rule-width": "column-rule-width", "-moz-column-width": "column-width", "-webkit-column-width": "column-width", "-webkit-column-span": "column-span", "-webkit-columns": "columns", "-moz-box-shadow": "box-shadow", "-webkit-box-shadow": "box-shadow", "-moz-transform" : "transform", "-webkit-transform" : "transform", "-o-transform" : "transform", "-ms-transform" : "transform", "-moz-transform-origin" : "transform-origin", "-webkit-transform-origin" : "transform-origin", "-o-transform-origin" : "transform-origin", "-ms-transform-origin" : "transform-origin", "-moz-box-sizing" : "box-sizing", "-webkit-box-sizing" : "box-sizing" }; function startRule(){ properties = {}; num = 1; } function endRule(){ var prop, i, len, needed, actual, needsStandard = []; for (prop in properties){ if (propertiesToCheck[prop]){ needsStandard.push({ actual: prop, needed: propertiesToCheck[prop]}); } } for (i=0, len=needsStandard.length; i < len; i++){ needed = needsStandard[i].needed; actual = needsStandard[i].actual; if (!properties[needed]){ reporter.report("Missing standard property '" + needed + "' to go along with '" + actual + "'.", properties[actual][0].name.line, properties[actual][0].name.col, rule); } else { if (properties[needed][0].pos < properties[actual][0].pos){ reporter.report("Standard property '" + needed + "' should come after vendor-prefixed property '" + actual + "'.", properties[actual][0].name.line, properties[actual][0].name.col, rule); } } } } parser.addListener("startrule", startRule); parser.addListener("startfontface", startRule); parser.addListener("startpage", startRule); parser.addListener("startpagemargin", startRule); parser.addListener("startkeyframerule", startRule); parser.addListener("property", function(event){ var name = event.property.text.toLowerCase(); if (!properties[name]){ properties[name] = []; } properties[name].push({ name: event.property, value : event.value, pos:num++ }); }); parser.addListener("endrule", endRule); parser.addListener("endfontface", endRule); parser.addListener("endpage", endRule); parser.addListener("endpagemargin", endRule); parser.addListener("endkeyframerule", endRule); } }); CSSLint.addRule({ id: "zero-units", name: "Disallow units for 0 values", desc: "You don't need to specify units when a value is 0.", browsers: "All", init: function(parser, reporter){ var rule = this; parser.addListener("property", function(event){ var parts = event.value.parts, i = 0, len = parts.length; while(i < len){ if ((parts[i].units || parts[i].type === "percentage") && parts[i].value === 0 && parts[i].type !== "time"){ reporter.report("Values of 0 shouldn't have units specified.", parts[i].line, parts[i].col, rule); } i++; } }); } }); (function() { var xmlEscape = function(str) { if (!str || str.constructor !== String) { return ""; } return str.replace(/[\"&><]/g, function(match) { switch (match) { case "\"": return "&quot;"; case "&": return "&amp;"; case "<": return "&lt;"; case ">": return "&gt;"; } }); }; CSSLint.addFormatter({ id: "checkstyle-xml", name: "Checkstyle XML format", startFormat: function(){ return "<?xml version=\"1.0\" encoding=\"utf-8\"?><checkstyle>"; }, endFormat: function(){ return "</checkstyle>"; }, readError: function(filename, message) { return "<file name=\"" + xmlEscape(filename) + "\"><error line=\"0\" column=\"0\" severty=\"error\" message=\"" + xmlEscape(message) + "\"></error></file>"; }, formatResults: function(results, filename/*, options*/) { var messages = results.messages, output = []; var generateSource = function(rule) { if (!rule || !("name" in rule)) { return ""; } return "net.csslint." + rule.name.replace(/\s/g,""); }; if (messages.length > 0) { output.push("<file name=\""+filename+"\">"); CSSLint.Util.forEach(messages, function (message) { if (!message.rollup) { output.push("<error line=\"" + message.line + "\" column=\"" + message.col + "\" severity=\"" + message.type + "\"" + " message=\"" + xmlEscape(message.message) + "\" source=\"" + generateSource(message.rule) +"\"/>"); } }); output.push("</file>"); } return output.join(""); } }); }()); CSSLint.addFormatter({ id: "compact", name: "Compact, 'porcelain' format", startFormat: function() { return ""; }, endFormat: function() { return ""; }, formatResults: function(results, filename, options) { var messages = results.messages, output = ""; options = options || {}; var capitalize = function(str) { return str.charAt(0).toUpperCase() + str.slice(1); }; if (messages.length === 0) { return options.quiet ? "" : filename + ": Lint Free!"; } CSSLint.Util.forEach(messages, function(message) { if (message.rollup) { output += filename + ": " + capitalize(message.type) + " - " + message.message + "\n"; } else { output += filename + ": " + "line " + message.line + ", col " + message.col + ", " + capitalize(message.type) + " - " + message.message + " (" + message.rule.id + ")\n"; } }); return output; } }); CSSLint.addFormatter({ id: "csslint-xml", name: "CSSLint XML format", startFormat: function(){ return "<?xml version=\"1.0\" encoding=\"utf-8\"?><csslint>"; }, endFormat: function(){ return "</csslint>"; }, formatResults: function(results, filename/*, options*/) { var messages = results.messages, output = []; var escapeSpecialCharacters = function(str) { if (!str || str.constructor !== String) { return ""; } return str.replace(/\"/g, "'").replace(/&/g, "&amp;").replace(/</g, "&lt;").replace(/>/g, "&gt;"); }; if (messages.length > 0) { output.push("<file name=\""+filename+"\">"); CSSLint.Util.forEach(messages, function (message) { if (message.rollup) { output.push("<issue severity=\"" + message.type + "\" reason=\"" + escapeSpecialCharacters(message.message) + "\" evidence=\"" + escapeSpecialCharacters(message.evidence) + "\"/>"); } else { output.push("<issue line=\"" + message.line + "\" char=\"" + message.col + "\" severity=\"" + message.type + "\"" + " reason=\"" + escapeSpecialCharacters(message.message) + "\" evidence=\"" + escapeSpecialCharacters(message.evidence) + "\"/>"); } }); output.push("</file>"); } return output.join(""); } }); CSSLint.addFormatter({ id: "junit-xml", name: "JUNIT XML format", startFormat: function(){ return "<?xml version=\"1.0\" encoding=\"utf-8\"?><testsuites>"; }, endFormat: function() { return "</testsuites>"; }, formatResults: function(results, filename/*, options*/) { var messages = results.messages, output = [], tests = { "error": 0, "failure": 0 }; var generateSource = function(rule) { if (!rule || !("name" in rule)) { return ""; } return "net.csslint." + rule.name.replace(/\s/g,""); }; var escapeSpecialCharacters = function(str) { if (!str || str.constructor !== String) { return ""; } return str.replace(/\"/g, "'").replace(/</g, "&lt;").replace(/>/g, "&gt;"); }; if (messages.length > 0) { messages.forEach(function (message) { var type = message.type === "warning" ? "error" : message.type; if (!message.rollup) { output.push("<testcase time=\"0\" name=\"" + generateSource(message.rule) + "\">"); output.push("<" + type + " message=\"" + escapeSpecialCharacters(message.message) + "\"><![CDATA[" + message.line + ":" + message.col + ":" + escapeSpecialCharacters(message.evidence) + "]]></" + type + ">"); output.push("</testcase>"); tests[type] += 1; } }); output.unshift("<testsuite time=\"0\" tests=\"" + messages.length + "\" skipped=\"0\" errors=\"" + tests.error + "\" failures=\"" + tests.failure + "\" package=\"net.csslint\" name=\"" + filename + "\">"); output.push("</testsuite>"); } return output.join(""); } }); CSSLint.addFormatter({ id: "lint-xml", name: "Lint XML format", startFormat: function(){ return "<?xml version=\"1.0\" encoding=\"utf-8\"?><lint>"; }, endFormat: function(){ return "</lint>"; }, formatResults: function(results, filename/*, options*/) { var messages = results.messages, output = []; var escapeSpecialCharacters = function(str) { if (!str || str.constructor !== String) { return ""; } return str.replace(/\"/g, "'").replace(/&/g, "&amp;").replace(/</g, "&lt;").replace(/>/g, "&gt;"); }; if (messages.length > 0) { output.push("<file name=\""+filename+"\">"); CSSLint.Util.forEach(messages, function (message) { if (message.rollup) { output.push("<issue severity=\"" + message.type + "\" reason=\"" + escapeSpecialCharacters(message.message) + "\" evidence=\"" + escapeSpecialCharacters(message.evidence) + "\"/>"); } else { output.push("<issue line=\"" + message.line + "\" char=\"" + message.col + "\" severity=\"" + message.type + "\"" + " reason=\"" + escapeSpecialCharacters(message.message) + "\" evidence=\"" + escapeSpecialCharacters(message.evidence) + "\"/>"); } }); output.push("</file>"); } return output.join(""); } }); CSSLint.addFormatter({ id: "text", name: "Plain Text", startFormat: function() { return ""; }, endFormat: function() { return ""; }, formatResults: function(results, filename, options) { var messages = results.messages, output = ""; options = options || {}; if (messages.length === 0) { return options.quiet ? "" : "\n\ncsslint: No errors in " + filename + "."; } output = "\n\ncsslint: There "; if (messages.length === 1) { output += "is 1 problem"; } else { output += "are " + messages.length + " problems"; } output += " in " + filename + "."; var pos = filename.lastIndexOf("/"), shortFilename = filename; if (pos === -1){ pos = filename.lastIndexOf("\\"); } if (pos > -1){ shortFilename = filename.substring(pos+1); } CSSLint.Util.forEach(messages, function (message, i) { output = output + "\n\n" + shortFilename; if (message.rollup) { output += "\n" + (i+1) + ": " + message.type; output += "\n" + message.message; } else { output += "\n" + (i+1) + ": " + message.type + " at line " + message.line + ", col " + message.col; output += "\n" + message.message; output += "\n" + message.evidence; } }); return output; } }); module.exports.CSSLint = CSSLint; }); define("ace/mode/css_worker",["require","exports","module","ace/lib/oop","ace/lib/lang","ace/worker/mirror","ace/mode/css/csslint"], function(require, exports, module) { "use strict"; var oop = require("../lib/oop"); var lang = require("../lib/lang"); var Mirror = require("../worker/mirror").Mirror; var CSSLint = require("./css/csslint").CSSLint; var Worker = exports.Worker = function(sender) { Mirror.call(this, sender); this.setTimeout(400); this.ruleset = null; this.setDisabledRules("ids|order-alphabetical"); this.setInfoRules( "adjoining-classes|qualified-headings|zero-units|gradients|" + "import|outline-none|vendor-prefix" ); }; oop.inherits(Worker, Mirror); (function() { this.setInfoRules = function(ruleNames) { if (typeof ruleNames == "string") ruleNames = ruleNames.split("|"); this.infoRules = lang.arrayToMap(ruleNames); this.doc.getValue() && this.deferredUpdate.schedule(100); }; this.setDisabledRules = function(ruleNames) { if (!ruleNames) { this.ruleset = null; } else { if (typeof ruleNames == "string") ruleNames = ruleNames.split("|"); var all = {}; CSSLint.getRules().forEach(function(x){ all[x.id] = true; }); ruleNames.forEach(function(x) { delete all[x]; }); this.ruleset = all; } this.doc.getValue() && this.deferredUpdate.schedule(100); }; this.onUpdate = function() { var value = this.doc.getValue(); if (!value) return this.sender.emit("annotate", []); var infoRules = this.infoRules; var result = CSSLint.verify(value, this.ruleset); this.sender.emit("annotate", result.messages.map(function(msg) { return { row: msg.line - 1, column: msg.col - 1, text: msg.message, type: infoRules[msg.rule.id] ? "info" : msg.type, rule: msg.rule.name } })); }; }).call(Worker.prototype); }); define("ace/lib/es5-shim",["require","exports","module"], function(require, exports, module) { function Empty() {} if (!Function.prototype.bind) { Function.prototype.bind = function bind(that) { // .length is 1 var target = this; if (typeof target != "function") { throw new TypeError("Function.prototype.bind called on incompatible " + target); } var args = slice.call(arguments, 1); // for normal call var bound = function () { if (this instanceof bound) { var result = target.apply( this, args.concat(slice.call(arguments)) ); if (Object(result) === result) { return result; } return this; } else { return target.apply( that, args.concat(slice.call(arguments)) ); } }; if(target.prototype) { Empty.prototype = target.prototype; bound.prototype = new Empty(); Empty.prototype = null; } return bound; }; } var call = Function.prototype.call; var prototypeOfArray = Array.prototype; var prototypeOfObject = Object.prototype; var slice = prototypeOfArray.slice; var _toString = call.bind(prototypeOfObject.toString); var owns = call.bind(prototypeOfObject.hasOwnProperty); var defineGetter; var defineSetter; var lookupGetter; var lookupSetter; var supportsAccessors; if ((supportsAccessors = owns(prototypeOfObject, "__defineGetter__"))) { defineGetter = call.bind(prototypeOfObject.__defineGetter__); defineSetter = call.bind(prototypeOfObject.__defineSetter__); lookupGetter = call.bind(prototypeOfObject.__lookupGetter__); lookupSetter = call.bind(prototypeOfObject.__lookupSetter__); } if ([1,2].splice(0).length != 2) { if(function() { // test IE < 9 to splice bug - see issue #138 function makeArray(l) { var a = new Array(l+2); a[0] = a[1] = 0; return a; } var array = [], lengthBefore; array.splice.apply(array, makeArray(20)); array.splice.apply(array, makeArray(26)); lengthBefore = array.length; //46 array.splice(5, 0, "XXX"); // add one element lengthBefore + 1 == array.length if (lengthBefore + 1 == array.length) { return true;// has right splice implementation without bugs } }()) {//IE 6/7 var array_splice = Array.prototype.splice; Array.prototype.splice = function(start, deleteCount) { if (!arguments.length) { return []; } else { return array_splice.apply(this, [ start === void 0 ? 0 : start, deleteCount === void 0 ? (this.length - start) : deleteCount ].concat(slice.call(arguments, 2))) } }; } else {//IE8 Array.prototype.splice = function(pos, removeCount){ var length = this.length; if (pos > 0) { if (pos > length) pos = length; } else if (pos == void 0) { pos = 0; } else if (pos < 0) { pos = Math.max(length + pos, 0); } if (!(pos+removeCount < length)) removeCount = length - pos; var removed = this.slice(pos, pos+removeCount); var insert = slice.call(arguments, 2); var add = insert.length; if (pos === length) { if (add) { this.push.apply(this, insert); } } else { var remove = Math.min(removeCount, length - pos); var tailOldPos = pos + remove; var tailNewPos = tailOldPos + add - remove; var tailCount = length - tailOldPos; var lengthAfterRemove = length - remove; if (tailNewPos < tailOldPos) { // case A for (var i = 0; i < tailCount; ++i) { this[tailNewPos+i] = this[tailOldPos+i]; } } else if (tailNewPos > tailOldPos) { // case B for (i = tailCount; i--; ) { this[tailNewPos+i] = this[tailOldPos+i]; } } // else, add == remove (nothing to do) if (add && pos === lengthAfterRemove) { this.length = lengthAfterRemove; // truncate array this.push.apply(this, insert); } else { this.length = lengthAfterRemove + add; // reserves space for (i = 0; i < add; ++i) { this[pos+i] = insert[i]; } } } return removed; }; } } if (!Array.isArray) { Array.isArray = function isArray(obj) { return _toString(obj) == "[object Array]"; }; } var boxedString = Object("a"), splitString = boxedString[0] != "a" || !(0 in boxedString); if (!Array.prototype.forEach) { Array.prototype.forEach = function forEach(fun /*, thisp*/) { var object = toObject(this), self = splitString && _toString(this) == "[object String]" ? this.split("") : object, thisp = arguments[1], i = -1, length = self.length >>> 0; if (_toString(fun) != "[object Function]") { throw new TypeError(); // TODO message } while (++i < length) { if (i in self) { fun.call(thisp, self[i], i, object); } } }; } if (!Array.prototype.map) { Array.prototype.map = function map(fun /*, thisp*/) { var object = toObject(this), self = splitString && _toString(this) == "[object String]" ? this.split("") : object, length = self.length >>> 0, result = Array(length), thisp = arguments[1]; if (_toString(fun) != "[object Function]") { throw new TypeError(fun + " is not a function"); } for (var i = 0; i < length; i++) { if (i in self) result[i] = fun.call(thisp, self[i], i, object); } return result; }; } if (!Array.prototype.filter) { Array.prototype.filter = function filter(fun /*, thisp */) { var object = toObject(this), self = splitString && _toString(this) == "[object String]" ? this.split("") : object, length = self.length >>> 0, result = [], value, thisp = arguments[1]; if (_toString(fun) != "[object Function]") { throw new TypeError(fun + " is not a function"); } for (var i = 0; i < length; i++) { if (i in self) { value = self[i]; if (fun.call(thisp, value, i, object)) { result.push(value); } } } return result; }; } if (!Array.prototype.every) { Array.prototype.every = function every(fun /*, thisp */) { var object = toObject(this), self = splitString && _toString(this) == "[object String]" ? this.split("") : object, length = self.length >>> 0, thisp = arguments[1]; if (_toString(fun) != "[object Function]") { throw new TypeError(fun + " is not a function"); } for (var i = 0; i < length; i++) { if (i in self && !fun.call(thisp, self[i], i, object)) { return false; } } return true; }; } if (!Array.prototype.some) { Array.prototype.some = function some(fun /*, thisp */) { var object = toObject(this), self = splitString && _toString(this) == "[object String]" ? this.split("") : object, length = self.length >>> 0, thisp = arguments[1]; if (_toString(fun) != "[object Function]") { throw new TypeError(fun + " is not a function"); } for (var i = 0; i < length; i++) { if (i in self && fun.call(thisp, self[i], i, object)) { return true; } } return false; }; } if (!Array.prototype.reduce) { Array.prototype.reduce = function reduce(fun /*, initial*/) { var object = toObject(this), self = splitString && _toString(this) == "[object String]" ? this.split("") : object, length = self.length >>> 0; if (_toString(fun) != "[object Function]") { throw new TypeError(fun + " is not a function"); } if (!length && arguments.length == 1) { throw new TypeError("reduce of empty array with no initial value"); } var i = 0; var result; if (arguments.length >= 2) { result = arguments[1]; } else { do { if (i in self) { result = self[i++]; break; } if (++i >= length) { throw new TypeError("reduce of empty array with no initial value"); } } while (true); } for (; i < length; i++) { if (i in self) { result = fun.call(void 0, result, self[i], i, object); } } return result; }; } if (!Array.prototype.reduceRight) { Array.prototype.reduceRight = function reduceRight(fun /*, initial*/) { var object = toObject(this), self = splitString && _toString(this) == "[object String]" ? this.split("") : object, length = self.length >>> 0; if (_toString(fun) != "[object Function]") { throw new TypeError(fun + " is not a function"); } if (!length && arguments.length == 1) { throw new TypeError("reduceRight of empty array with no initial value"); } var result, i = length - 1; if (arguments.length >= 2) { result = arguments[1]; } else { do { if (i in self) { result = self[i--]; break; } if (--i < 0) { throw new TypeError("reduceRight of empty array with no initial value"); } } while (true); } do { if (i in this) { result = fun.call(void 0, result, self[i], i, object); } } while (i--); return result; }; } if (!Array.prototype.indexOf || ([0, 1].indexOf(1, 2) != -1)) { Array.prototype.indexOf = function indexOf(sought /*, fromIndex */ ) { var self = splitString && _toString(this) == "[object String]" ? this.split("") : toObject(this), length = self.length >>> 0; if (!length) { return -1; } var i = 0; if (arguments.length > 1) { i = toInteger(arguments[1]); } i = i >= 0 ? i : Math.max(0, length + i); for (; i < length; i++) { if (i in self && self[i] === sought) { return i; } } return -1; }; } if (!Array.prototype.lastIndexOf || ([0, 1].lastIndexOf(0, -3) != -1)) { Array.prototype.lastIndexOf = function lastIndexOf(sought /*, fromIndex */) { var self = splitString && _toString(this) == "[object String]" ? this.split("") : toObject(this), length = self.length >>> 0; if (!length) { return -1; } var i = length - 1; if (arguments.length > 1) { i = Math.min(i, toInteger(arguments[1])); } i = i >= 0 ? i : length - Math.abs(i); for (; i >= 0; i--) { if (i in self && sought === self[i]) { return i; } } return -1; }; } if (!Object.getPrototypeOf) { Object.getPrototypeOf = function getPrototypeOf(object) { return object.__proto__ || ( object.constructor ? object.constructor.prototype : prototypeOfObject ); }; } if (!Object.getOwnPropertyDescriptor) { var ERR_NON_OBJECT = "Object.getOwnPropertyDescriptor called on a " + "non-object: "; Object.getOwnPropertyDescriptor = function getOwnPropertyDescriptor(object, property) { if ((typeof object != "object" && typeof object != "function") || object === null) throw new TypeError(ERR_NON_OBJECT + object); if (!owns(object, property)) return; var descriptor, getter, setter; descriptor = { enumerable: true, configurable: true }; if (supportsAccessors) { var prototype = object.__proto__; object.__proto__ = prototypeOfObject; var getter = lookupGetter(object, property); var setter = lookupSetter(object, property); object.__proto__ = prototype; if (getter || setter) { if (getter) descriptor.get = getter; if (setter) descriptor.set = setter; return descriptor; } } descriptor.value = object[property]; return descriptor; }; } if (!Object.getOwnPropertyNames) { Object.getOwnPropertyNames = function getOwnPropertyNames(object) { return Object.keys(object); }; } if (!Object.create) { var createEmpty; if (Object.prototype.__proto__ === null) { createEmpty = function () { return { "__proto__": null }; }; } else { createEmpty = function () { var empty = {}; for (var i in empty) empty[i] = null; empty.constructor = empty.hasOwnProperty = empty.propertyIsEnumerable = empty.isPrototypeOf = empty.toLocaleString = empty.toString = empty.valueOf = empty.__proto__ = null; return empty; } } Object.create = function create(prototype, properties) { var object; if (prototype === null) { object = createEmpty(); } else { if (typeof prototype != "object") throw new TypeError("typeof prototype["+(typeof prototype)+"] != 'object'"); var Type = function () {}; Type.prototype = prototype; object = new Type(); object.__proto__ = prototype; } if (properties !== void 0) Object.defineProperties(object, properties); return object; }; } function doesDefinePropertyWork(object) { try { Object.defineProperty(object, "sentinel", {}); return "sentinel" in object; } catch (exception) { } } if (Object.defineProperty) { var definePropertyWorksOnObject = doesDefinePropertyWork({}); var definePropertyWorksOnDom = typeof document == "undefined" || doesDefinePropertyWork(document.createElement("div")); if (!definePropertyWorksOnObject || !definePropertyWorksOnDom) { var definePropertyFallback = Object.defineProperty; } } if (!Object.defineProperty || definePropertyFallback) { var ERR_NON_OBJECT_DESCRIPTOR = "Property description must be an object: "; var ERR_NON_OBJECT_TARGET = "Object.defineProperty called on non-object: " var ERR_ACCESSORS_NOT_SUPPORTED = "getters & setters can not be defined " + "on this javascript engine"; Object.defineProperty = function defineProperty(object, property, descriptor) { if ((typeof object != "object" && typeof object != "function") || object === null) throw new TypeError(ERR_NON_OBJECT_TARGET + object); if ((typeof descriptor != "object" && typeof descriptor != "function") || descriptor === null) throw new TypeError(ERR_NON_OBJECT_DESCRIPTOR + descriptor); if (definePropertyFallback) { try { return definePropertyFallback.call(Object, object, property, descriptor); } catch (exception) { } } if (owns(descriptor, "value")) { if (supportsAccessors && (lookupGetter(object, property) || lookupSetter(object, property))) { var prototype = object.__proto__; object.__proto__ = prototypeOfObject; delete object[property]; object[property] = descriptor.value; object.__proto__ = prototype; } else { object[property] = descriptor.value; } } else { if (!supportsAccessors) throw new TypeError(ERR_ACCESSORS_NOT_SUPPORTED); if (owns(descriptor, "get")) defineGetter(object, property, descriptor.get); if (owns(descriptor, "set")) defineSetter(object, property, descriptor.set); } return object; }; } if (!Object.defineProperties) { Object.defineProperties = function defineProperties(object, properties) { for (var property in properties) { if (owns(properties, property)) Object.defineProperty(object, property, properties[property]); } return object; }; } if (!Object.seal) { Object.seal = function seal(object) { return object; }; } if (!Object.freeze) { Object.freeze = function freeze(object) { return object; }; } try { Object.freeze(function () {}); } catch (exception) { Object.freeze = (function freeze(freezeObject) { return function freeze(object) { if (typeof object == "function") { return object; } else { return freezeObject(object); } }; })(Object.freeze); } if (!Object.preventExtensions) { Object.preventExtensions = function preventExtensions(object) { return object; }; } if (!Object.isSealed) { Object.isSealed = function isSealed(object) { return false; }; } if (!Object.isFrozen) { Object.isFrozen = function isFrozen(object) { return false; }; } if (!Object.isExtensible) { Object.isExtensible = function isExtensible(object) { if (Object(object) === object) { throw new TypeError(); // TODO message } var name = ''; while (owns(object, name)) { name += '?'; } object[name] = true; var returnValue = owns(object, name); delete object[name]; return returnValue; }; } if (!Object.keys) { var hasDontEnumBug = true, dontEnums = [ "toString", "toLocaleString", "valueOf", "hasOwnProperty", "isPrototypeOf", "propertyIsEnumerable", "constructor" ], dontEnumsLength = dontEnums.length; for (var key in {"toString": null}) { hasDontEnumBug = false; } Object.keys = function keys(object) { if ( (typeof object != "object" && typeof object != "function") || object === null ) { throw new TypeError("Object.keys called on a non-object"); } var keys = []; for (var name in object) { if (owns(object, name)) { keys.push(name); } } if (hasDontEnumBug) { for (var i = 0, ii = dontEnumsLength; i < ii; i++) { var dontEnum = dontEnums[i]; if (owns(object, dontEnum)) { keys.push(dontEnum); } } } return keys; }; } if (!Date.now) { Date.now = function now() { return new Date().getTime(); }; } var ws = "\x09\x0A\x0B\x0C\x0D\x20\xA0\u1680\u180E\u2000\u2001\u2002\u2003" + "\u2004\u2005\u2006\u2007\u2008\u2009\u200A\u202F\u205F\u3000\u2028" + "\u2029\uFEFF"; if (!String.prototype.trim || ws.trim()) { ws = "[" + ws + "]"; var trimBeginRegexp = new RegExp("^" + ws + ws + "*"), trimEndRegexp = new RegExp(ws + ws + "*$"); String.prototype.trim = function trim() { return String(this).replace(trimBeginRegexp, "").replace(trimEndRegexp, ""); }; } function toInteger(n) { n = +n; if (n !== n) { // isNaN n = 0; } else if (n !== 0 && n !== (1/0) && n !== -(1/0)) { n = (n > 0 || -1) * Math.floor(Math.abs(n)); } return n; } function isPrimitive(input) { var type = typeof input; return ( input === null || type === "undefined" || type === "boolean" || type === "number" || type === "string" ); } function toPrimitive(input) { var val, valueOf, toString; if (isPrimitive(input)) { return input; } valueOf = input.valueOf; if (typeof valueOf === "function") { val = valueOf.call(input); if (isPrimitive(val)) { return val; } } toString = input.toString; if (typeof toString === "function") { val = toString.call(input); if (isPrimitive(val)) { return val; } } throw new TypeError(); } var toObject = function (o) { if (o == null) { // this matches both null and undefined throw new TypeError("can't convert "+o+" to object"); } return Object(o); }; });
nikste/visualizationDemo
zeppelin-web/bower_components/ace-builds/src/worker-css.js
JavaScript
apache-2.0
292,988
/* ** Trace recorder (bytecode -> SSA IR). ** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h */ #ifndef _LJ_RECORD_H #define _LJ_RECORD_H #include "lj_obj.h" #include "lj_jit.h" #if LJ_HASJIT /* Context for recording an indexed load/store. */ typedef struct RecordIndex { TValue tabv; /* Runtime value of table (or indexed object). */ TValue keyv; /* Runtime value of key. */ TValue valv; /* Runtime value of stored value. */ TValue mobjv; /* Runtime value of metamethod object. */ GCtab *mtv; /* Runtime value of metatable object. */ cTValue *oldv; /* Runtime value of previously stored value. */ TRef tab; /* Table (or indexed object) reference. */ TRef key; /* Key reference. */ TRef val; /* Value reference for a store or 0 for a load. */ TRef mt; /* Metatable reference. */ TRef mobj; /* Metamethod object reference. */ int idxchain; /* Index indirections left or 0 for raw lookup. */ } RecordIndex; LJ_FUNC int lj_record_objcmp(jit_State *J, TRef a, TRef b, cTValue *av, cTValue *bv); LJ_FUNC void lj_record_stop(jit_State *J, TraceLink linktype, TraceNo lnk); LJ_FUNC TRef lj_record_constify(jit_State *J, cTValue *o); LJ_FUNC void lj_record_call(jit_State *J, BCReg func, ptrdiff_t nargs); LJ_FUNC void lj_record_tailcall(jit_State *J, BCReg func, ptrdiff_t nargs); LJ_FUNC void lj_record_ret(jit_State *J, BCReg rbase, ptrdiff_t gotresults); LJ_FUNC int lj_record_mm_lookup(jit_State *J, RecordIndex *ix, MMS mm); LJ_FUNC TRef lj_record_idx(jit_State *J, RecordIndex *ix); LJ_FUNC void lj_record_ins(jit_State *J); LJ_FUNC void lj_record_setup(jit_State *J); #endif #endif
EliHar/Pattern_recognition
torch1/exe/luajit-rocks/luajit-2.1/src/lj_record.h
C
mit
1,654
// Type definitions for Browserify // Project: http://browserify.org/ // Definitions by: Andrew Gaspar <https://github.com/AndrewGaspar/> // Definitions: https://github.com/borisyankov/DefinitelyTyped /// <reference path="../node/node.d.ts" /> interface BrowserifyObject extends NodeJS.EventEmitter { add(file:string): BrowserifyObject; require(file:string, opts?:{ expose: string; }): BrowserifyObject; bundle(opts?:{ insertGlobals?: boolean; detectGlobals?: boolean; debug?: boolean; standalone?: string; insertGlobalVars?: any; }, cb?:(err:any, src:any) => void): NodeJS.ReadableStream; external(file:string): BrowserifyObject; ignore(file:string): BrowserifyObject; transform(tr:string): BrowserifyObject; transform(tr:Function): BrowserifyObject; plugin(plugin:string, opts?:any): BrowserifyObject; plugin(plugin:Function, opts?:any): BrowserifyObject; } interface Browserify { (): BrowserifyObject; (files:string[]): BrowserifyObject; (opts:{ entries?: string[]; noParse?: string[]; }): BrowserifyObject; } declare module "browserify" { var browserify: Browserify; export = browserify; }
johnsoft/hashwick
vendor/browserify/browserify.d.ts
TypeScript
mit
1,165
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // See the LICENSE file in the project root for more information. using System; using System.Reflection; using System.Diagnostics; using System.Runtime.CompilerServices; namespace ObjectStackAllocation { class SimpleClassA { public int f1; public int f2; public SimpleClassA(int f1, int f2) { this.f1 = f1; this.f2 = f2; } } sealed class SimpleClassB { public long f1; public long f2; public SimpleClassB(long f1, long f2) { this.f1 = f1; this.f2 = f2; } } sealed class SimpleClassWithGCField : SimpleClassA { public object o; public SimpleClassWithGCField(int f1, int f2, object o) : base(f1, f2) { this.o = o; } } class ClassWithNestedStruct { public ClassWithNestedStruct(int f1, int f2) { ns.f1 = f1; ns.f2 = f2; ns.s.f1 = f1; ns.s.f2 = f2; } public NestedStruct ns; } struct SimpleStruct { public int f1; public int f2; } struct NestedStruct { public int f1; public int f2; public SimpleStruct s; } enum AllocationKind { Heap, Stack, Undefined } class Tests { static volatile int f1 = 5; static volatile int f2 = 7; static SimpleClassA classA; static SimpleClassWithGCField classWithGCField; static string str0; static string str1; static string str2; static string str3; static string str4; delegate int Test(); static int methodResult = 100; public static int Main() { AllocationKind expectedAllocationKind = AllocationKind.Stack; if (GCStressEnabled()) { Console.WriteLine("GCStress is enabled"); expectedAllocationKind = AllocationKind.Undefined; } else if (!SPCOptimizationsEnabled()) { Console.WriteLine("System.Private.CoreLib.dll optimizations are disabled"); expectedAllocationKind = AllocationKind.Heap; } classA = new SimpleClassA(f1, f2); classWithGCField = new SimpleClassWithGCField(f1, f2, null); str0 = "str_zero"; str1 = "str_one"; str2 = "str_two"; str3 = "str_three"; str4 = "str_four"; CallTestAndVerifyAllocation(AllocateSimpleClassAndAddFields, 12, expectedAllocationKind); CallTestAndVerifyAllocation(AllocateSimpleClassesAndEQCompareThem, 0, expectedAllocationKind); CallTestAndVerifyAllocation(AllocateSimpleClassesAndNECompareThem, 1, expectedAllocationKind); CallTestAndVerifyAllocation(AllocateSimpleClassAndGetField, 7, expectedAllocationKind); CallTestAndVerifyAllocation(AllocateClassWithNestedStructAndGetField, 5, expectedAllocationKind); CallTestAndVerifyAllocation(AllocateClassWithNestedStructAndAddFields, 24, expectedAllocationKind); CallTestAndVerifyAllocation(AllocateSimpleClassWithGCFieldAndAddFields, 12, expectedAllocationKind); CallTestAndVerifyAllocation(AllocateSimpleClassAndAssignRefToAField, 12, expectedAllocationKind); CallTestAndVerifyAllocation(TestMixOfReportingAndWriteBarriers, 34, expectedAllocationKind); // The object is currently allocated on the stack when this method is jitted and on the heap when it's R2R-compiled. // The reason is that we always do the type check via helper in R2R mode, which blocks stack allocation. // We don't have to use a helper in this case (even for R2R), https://github.com/dotnet/coreclr/issues/22086 tracks fixing that. CallTestAndVerifyAllocation(AllocateSimpleClassAndCheckTypeNoHelper, 1, AllocationKind.Undefined); // The remaining tests currently never allocate on the stack if (expectedAllocationKind == AllocationKind.Stack) { expectedAllocationKind = AllocationKind.Heap; } // This test calls CORINFO_HELP_ISINSTANCEOFCLASS CallTestAndVerifyAllocation(AllocateSimpleClassAndCheckTypeHelper, 1, expectedAllocationKind); // This test calls CORINFO_HELP_CHKCASTCLASS_SPECIAL CallTestAndVerifyAllocation(AllocateSimpleClassAndCast, 7, expectedAllocationKind); // Stack allocation of boxed structs is currently disabled CallTestAndVerifyAllocation(BoxSimpleStructAndAddFields, 12, expectedAllocationKind); return methodResult; } static bool SPCOptimizationsEnabled() { Assembly objectAssembly = Assembly.GetAssembly(typeof(object)); object[] attribs = objectAssembly.GetCustomAttributes(typeof(DebuggableAttribute), false); DebuggableAttribute debuggableAttribute = attribs[0] as DebuggableAttribute; return ((debuggableAttribute == null) || !debuggableAttribute.IsJITOptimizerDisabled); } static bool GCStressEnabled() { return Environment.GetEnvironmentVariable("COMPlus_GCStress") != null; } static void CallTestAndVerifyAllocation(Test test, int expectedResult, AllocationKind expectedAllocationsKind) { // Run the test once to exclude any allocations during jitting, etc. //test(); long allocatedBytesBefore = GC.GetAllocatedBytesForCurrentThread(); int testResult = test(); long allocatedBytesAfter = GC.GetAllocatedBytesForCurrentThread(); string methodName = test.Method.Name; if (testResult != expectedResult) { Console.WriteLine($"FAILURE ({methodName}): expected {expectedResult}, got {testResult}"); methodResult = -1; } else if ((expectedAllocationsKind == AllocationKind.Stack) && (allocatedBytesBefore != allocatedBytesAfter)) { Console.WriteLine($"FAILURE ({methodName}): unexpected allocation of {allocatedBytesAfter - allocatedBytesBefore} bytes"); methodResult = -1; } else if ((expectedAllocationsKind == AllocationKind.Heap) && (allocatedBytesBefore == allocatedBytesAfter)) { Console.WriteLine($"FAILURE ({methodName}): unexpected stack allocation"); methodResult = -1; } else { Console.WriteLine($"SUCCESS ({methodName})"); } } [MethodImpl(MethodImplOptions.NoInlining)] static int AllocateSimpleClassAndAddFields() { SimpleClassA a = new SimpleClassA(f1, f2); GC.Collect(); return a.f1 + a.f2; } [MethodImpl(MethodImplOptions.NoInlining)] static int AllocateSimpleClassesAndEQCompareThem() { SimpleClassA a1 = new SimpleClassA(f1, f2); SimpleClassA a2 = (f1 == 0) ? a1 : new SimpleClassA(f2, f1); GC.Collect(); return (a1 == a2) ? 1 : 0; } [MethodImpl(MethodImplOptions.NoInlining)] static int AllocateSimpleClassesAndNECompareThem() { SimpleClassA a1 = new SimpleClassA(f1, f2); SimpleClassA a2 = (f1 == 0) ? a1 : new SimpleClassA(f2, f1); GC.Collect(); return (a1 != a2) ? 1 : 0; } [MethodImpl(MethodImplOptions.NoInlining)] static int AllocateSimpleClassAndCheckTypeNoHelper() { object o = (f1 == 0) ? (object)new SimpleClassB(f1, f2) : (object)new SimpleClassA(f1, f2); GC.Collect(); return (o is SimpleClassB) ? 0 : 1; } [MethodImpl(MethodImplOptions.NoInlining)] static int AllocateSimpleClassAndCheckTypeHelper() { object o = (f1 == 0) ? (object)new SimpleClassB(f1, f2) : (object)new SimpleClassA(f1, f2); GC.Collect(); return !(o is SimpleClassA) ? 0 : 1; } [MethodImpl(MethodImplOptions.NoInlining)] static int AllocateSimpleClassAndCast() { object o = (f1 == 0) ? (object)new SimpleClassB(f1, f2) : (object)new SimpleClassA(f2, f1); GC.Collect(); return ((SimpleClassA)o).f1; } [MethodImpl(MethodImplOptions.NoInlining)] static int AllocateSimpleClassAndGetField() { SimpleClassA a = new SimpleClassA(f1, f2); GC.Collect(); ref int f = ref a.f2; return f; } [MethodImpl(MethodImplOptions.NoInlining)] static int AllocateClassWithNestedStructAndGetField() { ClassWithNestedStruct c = new ClassWithNestedStruct(f1, f2); GC.Collect(); ref int f = ref c.ns.s.f1; return f; } [MethodImpl(MethodImplOptions.NoInlining)] static int AllocateClassWithNestedStructAndAddFields() { ClassWithNestedStruct c = new ClassWithNestedStruct(f1, f2); GC.Collect(); return c.ns.f1 + c.ns.f2 + c.ns.s.f1 + c.ns.s.f2; } [MethodImpl(MethodImplOptions.NoInlining)] static int AllocateSimpleClassWithGCFieldAndAddFields() { SimpleClassWithGCField c = new SimpleClassWithGCField(f1, f2, null); GC.Collect(); return c.f1 + c.f2; } static int AllocateSimpleClassAndAssignRefToAField() { SimpleClassWithGCField c = new SimpleClassWithGCField(f1, f2, null); GC.Collect(); c.o = classA; return c.f1 + c.f2; } [MethodImpl(MethodImplOptions.NoInlining)] static int BoxSimpleStructAndAddFields() { SimpleStruct str; str.f1 = f1; str.f2 = f2; object boxedSimpleStruct = (object)str; GC.Collect(); return ((SimpleStruct)boxedSimpleStruct).f1 + ((SimpleStruct)boxedSimpleStruct).f2; } [MethodImpl(MethodImplOptions.NoInlining)] static int TestMixOfReportingAndWriteBarriers() { // c1 doesn't escape and is allocated on the stack SimpleClassWithGCField c1 = new SimpleClassWithGCField(f1, f2, str0); // c2 always points to a heap-allocated object SimpleClassWithGCField c2 = classWithGCField; // c2 and c3 may point to a heap-allocated object or to a stack-allocated object SimpleClassWithGCField c3 = (f1 == 0) ? c1 : c2; SimpleClassWithGCField c4 = (f2 == 0) ? c2 : c1; // c1 doesn't have to be reported to GC (but can be conservatively reported as an interior pointer) // c1.o should be reported to GC as a normal pointer (but can be conservatively reported as an interior pointer) // c2 should be reported to GC as a normal pointer (but can be conservatively reported as an interior pointer) // c3 and c4 must be reported as interior pointers GC.Collect(); // This assignment doesn't need a write barrier but may conservatively use a checked barrier c1.o = str1; // This assignment should optimally use a normal write barrier but may conservatively use a checked barrier c2.o = str2; // These assignments require a checked write barrier c3.o = str3; c4.o = str4; return c1.o.ToString().Length + c2.o.ToString().Length + c3.o.ToString().Length + c4.o.ToString().Length; } } }
poizan42/coreclr
tests/src/JIT/opt/ObjectStackAllocation/ObjectStackAllocationTests.cs
C#
mit
12,040
/* * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifndef __MSM_CLOCKS_CALIFORNIUM_H #define __MSM_CLOCKS_CALIFORNIUM_H /* RPM controlled clocks */ #define clk_xo 0xf13dfee3 #define clk_xo_a_clk 0xd939b99b #define clk_ce_clk 0xd8bc64e1 #define clk_ce_a_clk 0x4dfefd47 #define clk_pcnoc_clk 0xc1296d0f #define clk_pcnoc_a_clk 0x9bcffee4 #define clk_bimc_clk 0x4b80bf00 #define clk_bimc_a_clk 0x4b25668a #define clk_snoc_clk 0x2c341aa0 #define clk_snoc_a_clk 0x8fcef2af #define clk_ipa_clk 0xfa685cda #define clk_ipa_a_clk 0xeeec2919 #define clk_qpic_clk 0x3ce6f7bb #define clk_qpic_a_clk 0xd70ccb7c #define clk_qdss_clk 0x1492202a #define clk_qdss_a_clk 0xdd121669 #define clk_bimc_msmbus_clk 0xd212feea #define clk_bimc_msmbus_a_clk 0x71d1a499 #define clk_mcd_ce_clk 0x7ad13979 #define clk_pcnoc_keepalive_a_clk 0x9464f720 #define clk_pcnoc_msmbus_clk 0x2b53b688 #define clk_pcnoc_msmbus_a_clk 0x9753a54f #define clk_pcnoc_pm_clk 0x5e636b5d #define clk_pcnoc_sps_clk 0x23d3f584 #define clk_qcedev_ce_clk 0x2e7f9cee #define clk_qcrypto_ce_clk 0xd8cd060b #define clk_qseecom_ce_clk 0xea036e4b #define clk_scm_ce_clk 0xfd35bb87 #define clk_snoc_msmbus_clk 0xe6900bb6 #define clk_snoc_msmbus_a_clk 0x5d4683bd #define clk_cxo_dwc3_clk 0xf79c19f6 #define clk_cxo_lpm_clk 0x94adbf3d #define clk_cxo_otg_clk 0x4eec0bb9 #define clk_div_clk1 0xaa1157a6 #define clk_div_clk1_ao 0x6b943d68 #define clk_ln_bb_clk 0x3ab0b36d #define clk_ln_bb_a_clk 0xc7257ea8 #define clk_rf_clk1 0xaabeea5a #define clk_rf_clk1_ao 0x72a10cb8 #define clk_rf_clk1_pin 0x8f463562 #define clk_rf_clk1_pin_ao 0x62549ff6 #define clk_rf_clk2 0x24a30992 #define clk_rf_clk2_ao 0x944d8bbd #define clk_rf_clk2_pin 0xa7c5602a #define clk_rf_clk2_pin_ao 0x2d75eb4d #define clk_rf_clk3 0xb673936b #define clk_rf_clk3_ao 0x038bb968 #define clk_rf_clk3_pin 0x726f53f5 #define clk_rf_clk3_pin_ao 0x76f9240f /* APSS controlled clocks */ #define clk_gpll0 0x1ebe3bc4 #define clk_gpll0_ao 0xa1368304 #define clk_gpll0_out_msscc 0x7d794829 #define clk_apss_ahb_clk_src 0x36f8495f #define clk_usb30_master_clk_src 0xc6262f89 #define clk_blsp1_qup1_i2c_apps_clk_src 0x17f78f5e #define clk_blsp1_qup1_spi_apps_clk_src 0xf534c4fa #define clk_blsp1_qup2_i2c_apps_clk_src 0x8de71c79 #define clk_blsp1_qup2_spi_apps_clk_src 0x33cf809a #define clk_blsp1_qup3_i2c_apps_clk_src 0xf161b902 #define clk_blsp1_qup3_spi_apps_clk_src 0x5e95683f #define clk_blsp1_qup4_i2c_apps_clk_src 0xb2ecce68 #define clk_blsp1_qup4_spi_apps_clk_src 0xddb5bbdb #define clk_blsp1_uart1_apps_clk_src 0xf8146114 #define clk_blsp1_uart2_apps_clk_src 0xfc9c2f73 #define clk_blsp1_uart3_apps_clk_src 0x600497f2 #define clk_blsp1_uart4_apps_clk_src 0x56bff15c #define clk_gp1_clk_src 0xad85b97a #define clk_gp2_clk_src 0xfb1f0065 #define clk_gp3_clk_src 0x63b693d6 #define clk_pcie_aux_clk_src 0xebc50566 #define clk_pdm2_clk_src 0x31e494fd #define clk_sdcc1_apps_clk_src 0xd4975db2 #define clk_usb30_mock_utmi_clk_src 0xa024a976 #define clk_usb3_aux_clk_src 0xfde7ae09 #define clk_gcc_pcie_phy_reset 0x9bc3c959 #define clk_gcc_qusb2a_phy_reset 0x2a9dfa9f #define clk_gcc_usb3phy_phy_reset 0xb1a4f885 #define clk_gcc_usb3_phy_reset 0x03d559f1 #define clk_gpll0_out_main_cgc 0xb0298998 #define clk_gcc_blsp1_ahb_clk 0x8caa5b4f #define clk_gcc_blsp1_qup1_i2c_apps_clk 0xc303fae9 #define clk_gcc_blsp1_qup1_spi_apps_clk 0x759a76b0 #define clk_gcc_blsp1_qup2_i2c_apps_clk 0x1076f220 #define clk_gcc_blsp1_qup2_spi_apps_clk 0x3e77d48f #define clk_gcc_blsp1_qup3_i2c_apps_clk 0x9e25ac82 #define clk_gcc_blsp1_qup3_spi_apps_clk 0xfb978880 #define clk_gcc_blsp1_qup4_i2c_apps_clk 0xd7f40f6f #define clk_gcc_blsp1_qup4_spi_apps_clk 0x80f8722f #define clk_gcc_blsp1_uart1_apps_clk 0xc7c62f90 #define clk_gcc_blsp1_uart2_apps_clk 0xf8a61c96 #define clk_gcc_blsp1_uart3_apps_clk 0xc3298bd7 #define clk_gcc_blsp1_uart4_apps_clk 0x26be16c0 #define clk_gcc_boot_rom_ahb_clk 0xde2adeb1 #define clk_gcc_dcc_clk 0xd1000c50 #define clk_gpll0_out_main_div2_cgc 0xc76ac7ae #define clk_gcc_gp1_clk 0x057f7b69 #define clk_gcc_gp2_clk 0x9bf83ffd #define clk_gcc_gp3_clk 0xec6539ee #define clk_gcc_mss_q6_bimc_axi_clk 0x67544d62 #define clk_gcc_pcie_axi_clk 0xb833d9e3 #define clk_gcc_pcie_axi_mstr_clk 0x54d09178 #define clk_gcc_pcie_cfg_ahb_clk 0xddc9a515 #define clk_gcc_pcie_pipe_clk 0x8be62558 #define clk_gcc_pcie_sleep_clk 0x8b8bfc3b #define clk_gcc_pdm2_clk 0x99d55711 #define clk_gcc_pdm_ahb_clk 0x365664f6 #define clk_gcc_prng_ahb_clk 0x397e7eaa #define clk_gcc_sdcc1_ahb_clk 0x691e0caa #define clk_gcc_sdcc1_apps_clk 0x9ad6fb96 #define clk_gcc_apss_tcu_clk 0xaf56a329 #define clk_gcc_pcie_axi_tbu_clk 0xab70f06e #define clk_gcc_pcie_ref_clk 0x63fca50a #define clk_gcc_usb_ss_ref_clk 0xb85dadfa #define clk_gcc_qusb_ref_clk 0x16e35a90 #define clk_gcc_smmu_cfg_clk 0x75eaefa5 #define clk_gcc_usb3_axi_tbu_clk 0x18779c6e #define clk_gcc_sys_noc_usb3_axi_clk 0x94d26800 #define clk_gcc_usb30_master_clk 0xb3b4e2cb #define clk_gcc_usb30_mock_utmi_clk 0xa800b65a #define clk_gcc_usb30_sleep_clk 0xd0b65c92 #define clk_gcc_usb3_aux_clk 0x555d16b2 #define clk_gcc_usb3_pipe_clk 0x26f8a97a #define clk_gcc_usb_phy_cfg_ahb_clk 0xccb7e26f #define clk_gcc_mss_cfg_ahb_clk 0x111cde81 /* a7pll */ #define clk_a7pll_clk 0x3dd5dd94 /* clock_debug controlled clocks */ #define clk_gcc_debug_mux 0x8121ac15 /* Audio External Clocks */ #define clk_audio_lpass_mclk 0x575ec22b #endif
ZeroInfinityXDA/HelixKernel_Nougat
include/dt-bindings/clock/msm-clocks-californium.h
C
gpl-2.0
5,857
/* * Copyright (C) 2016 BayLibre, SAS * Author: Neil Armstrong <[email protected]> * Copyright (C) 2015 Amlogic, Inc. All rights reserved. * Copyright (C) 2014 Endless Mobile * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see <http://www.gnu.org/licenses/>. * * Written by: * Jasper St. Pierre <[email protected]> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/platform_device.h> #include <drm/drmP.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_plane_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_fb_cma_helper.h> #include <drm/drm_rect.h> #include "meson_plane.h" #include "meson_vpp.h" #include "meson_viu.h" #include "meson_canvas.h" #include "meson_registers.h" struct meson_plane { struct drm_plane base; struct meson_drm *priv; }; #define to_meson_plane(x) container_of(x, struct meson_plane, base) static int meson_plane_atomic_check(struct drm_plane *plane, struct drm_plane_state *state) { struct drm_crtc_state *crtc_state; struct drm_rect clip = { 0, }; if (!state->crtc) return 0; crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc); if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); clip.x2 = crtc_state->mode.hdisplay; clip.y2 = crtc_state->mode.vdisplay; return drm_plane_helper_check_state(state, &clip, DRM_PLANE_HELPER_NO_SCALING, DRM_PLANE_HELPER_NO_SCALING, true, true); } /* Takes a fixed 16.16 number and converts it to integer. */ static inline int64_t fixed16_to_int(int64_t value) { return value >> 16; } static void meson_plane_atomic_update(struct drm_plane *plane, struct drm_plane_state *old_state) { struct meson_plane *meson_plane = to_meson_plane(plane); struct drm_plane_state *state = plane->state; struct drm_framebuffer *fb = state->fb; struct meson_drm *priv = meson_plane->priv; struct drm_gem_cma_object *gem; struct drm_rect src = { .x1 = (state->src_x), .y1 = (state->src_y), .x2 = (state->src_x + state->src_w), .y2 = (state->src_y + state->src_h), }; struct drm_rect dest = { .x1 = state->crtc_x, .y1 = state->crtc_y, .x2 = state->crtc_x + state->crtc_w, .y2 = state->crtc_y + state->crtc_h, }; unsigned long flags; /* * Update Coordinates * Update Formats * Update Buffer * Enable Plane */ spin_lock_irqsave(&priv->drm->event_lock, flags); /* Enable OSD and BLK0, set max global alpha */ priv->viu.osd1_ctrl_stat = OSD_ENABLE | (0xFF << OSD_GLOBAL_ALPHA_SHIFT) | OSD_BLK0_ENABLE; /* Set up BLK0 to point to the right canvas */ priv->viu.osd1_blk0_cfg[0] = ((MESON_CANVAS_ID_OSD1 << OSD_CANVAS_SEL) | OSD_ENDIANNESS_LE); /* On GXBB, Use the old non-HDR RGB2YUV converter */ if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) priv->viu.osd1_blk0_cfg[0] |= OSD_OUTPUT_COLOR_RGB; switch (fb->format->format) { case DRM_FORMAT_XRGB8888: /* For XRGB, replace the pixel's alpha by 0xFF */ writel_bits_relaxed(OSD_REPLACE_EN, OSD_REPLACE_EN, priv->io_base + _REG(VIU_OSD1_CTRL_STAT2)); priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 | OSD_COLOR_MATRIX_32_ARGB; break; case DRM_FORMAT_ARGB8888: /* For ARGB, use the pixel's alpha */ writel_bits_relaxed(OSD_REPLACE_EN, 0, priv->io_base + _REG(VIU_OSD1_CTRL_STAT2)); priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 | OSD_COLOR_MATRIX_32_ARGB; break; case DRM_FORMAT_RGB888: priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_24 | OSD_COLOR_MATRIX_24_RGB; break; case DRM_FORMAT_RGB565: priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_16 | OSD_COLOR_MATRIX_16_RGB565; break; }; if (state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) { priv->viu.osd1_interlace = true; dest.y1 /= 2; dest.y2 /= 2; } else priv->viu.osd1_interlace = false; /* * The format of these registers is (x2 << 16 | x1), * where x2 is exclusive. * e.g. +30x1920 would be (1919 << 16) | 30 */ priv->viu.osd1_blk0_cfg[1] = ((fixed16_to_int(src.x2) - 1) << 16) | fixed16_to_int(src.x1); priv->viu.osd1_blk0_cfg[2] = ((fixed16_to_int(src.y2) - 1) << 16) | fixed16_to_int(src.y1); priv->viu.osd1_blk0_cfg[3] = ((dest.x2 - 1) << 16) | dest.x1; priv->viu.osd1_blk0_cfg[4] = ((dest.y2 - 1) << 16) | dest.y1; /* Update Canvas with buffer address */ gem = drm_fb_cma_get_gem_obj(fb, 0); meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1, gem->paddr, fb->pitches[0], fb->height, MESON_CANVAS_WRAP_NONE, MESON_CANVAS_BLKMODE_LINEAR); spin_unlock_irqrestore(&priv->drm->event_lock, flags); } static void meson_plane_atomic_disable(struct drm_plane *plane, struct drm_plane_state *old_state) { struct meson_plane *meson_plane = to_meson_plane(plane); struct meson_drm *priv = meson_plane->priv; /* Disable OSD1 */ writel_bits_relaxed(VPP_OSD1_POSTBLEND, 0, priv->io_base + _REG(VPP_MISC)); } static const struct drm_plane_helper_funcs meson_plane_helper_funcs = { .atomic_check = meson_plane_atomic_check, .atomic_disable = meson_plane_atomic_disable, .atomic_update = meson_plane_atomic_update, }; static const struct drm_plane_funcs meson_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .destroy = drm_plane_cleanup, .reset = drm_atomic_helper_plane_reset, .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, }; static const uint32_t supported_drm_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB8888, DRM_FORMAT_RGB888, DRM_FORMAT_RGB565, }; int meson_plane_create(struct meson_drm *priv) { struct meson_plane *meson_plane; struct drm_plane *plane; meson_plane = devm_kzalloc(priv->drm->dev, sizeof(*meson_plane), GFP_KERNEL); if (!meson_plane) return -ENOMEM; meson_plane->priv = priv; plane = &meson_plane->base; drm_universal_plane_init(priv->drm, plane, 0xFF, &meson_plane_funcs, supported_drm_formats, ARRAY_SIZE(supported_drm_formats), NULL, DRM_PLANE_TYPE_PRIMARY, "meson_primary_plane"); drm_plane_helper_add(plane, &meson_plane_helper_funcs); priv->primary_plane = plane; return 0; }
animalcreek/linux
drivers/gpu/drm/meson/meson_plane.c
C
gpl-2.0
6,873
/* * Copyright 2002-2007 Sascha Weinreuter * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.intellij.lang.xpath.xslt.run; import com.intellij.diagnostic.logging.AdditionalTabComponent; import com.intellij.execution.CantRunException; import com.intellij.execution.configurations.AdditionalTabComponentManager; import com.intellij.execution.configurations.SimpleJavaParameters; import com.intellij.execution.process.ProcessHandler; import com.intellij.execution.process.ProcessListener; import com.intellij.openapi.extensions.ExtensionPointName; import com.intellij.openapi.extensions.Extensions; import com.intellij.openapi.project.Project; import com.intellij.openapi.util.UserDataHolder; import org.jetbrains.annotations.NotNull; import java.util.ArrayList; import java.util.List; /** * Created by IntelliJ IDEA. * User: sweinreuter * Date: 14.06.2007 */ public abstract class XsltRunnerExtension { public static final ExtensionPointName<XsltRunnerExtension> EXTENSION_POINT_NAME = ExtensionPointName.create("XPathView.xsltRunnerExtension"); public abstract ProcessListener createProcessListener(Project project, UserDataHolder extensionData); /** * Add additional tabs in XSLT runner's run console. */ public abstract boolean createTabs(Project project, AdditionalTabComponentManager manager, AdditionalTabComponent outputConsole, ProcessHandler process); public abstract void patchParameters(SimpleJavaParameters parameters, XsltRunConfiguration xsltCommandLineState, UserDataHolder extensionData) throws CantRunException; protected abstract boolean supports(XsltRunConfiguration config, boolean debugger); @NotNull public static List<XsltRunnerExtension> getExtensions(XsltRunConfiguration config, boolean debugger) { final XsltRunnerExtension[] extensions = Extensions.getExtensions(EXTENSION_POINT_NAME); final ArrayList<XsltRunnerExtension> list = new ArrayList<XsltRunnerExtension>(extensions.length); for (XsltRunnerExtension extension : extensions) { if (extension.supports(config, debugger)) { list.add(extension); } } return list; } }
clumsy/intellij-community
plugins/xpath/xpath-lang/src/org/intellij/lang/xpath/xslt/run/XsltRunnerExtension.java
Java
apache-2.0
2,714
/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package webhook implements the audit.Backend interface using HTTP webhooks. package webhook import ( "fmt" "time" "k8s.io/apimachinery/pkg/runtime/schema" auditinternal "k8s.io/apiserver/pkg/apis/audit" "k8s.io/apiserver/pkg/apis/audit/install" "k8s.io/apiserver/pkg/audit" "k8s.io/apiserver/pkg/util/webhook" "k8s.io/client-go/rest" ) const ( // PluginName is the name of this plugin, to be used in help and logs. PluginName = "webhook" // DefaultInitialBackoff is the default amount of time to wait before // retrying sending audit events through a webhook. DefaultInitialBackoff = 10 * time.Second ) func init() { install.Install(audit.Scheme) } func loadWebhook(configFile string, groupVersion schema.GroupVersion, initialBackoff time.Duration) (*webhook.GenericWebhook, error) { return webhook.NewGenericWebhook(audit.Scheme, audit.Codecs, configFile, []schema.GroupVersion{groupVersion}, initialBackoff) } type backend struct { w *webhook.GenericWebhook name string } // NewDynamicBackend returns an audit backend configured from a REST client that // sends events over HTTP to an external service. func NewDynamicBackend(rc *rest.RESTClient, initialBackoff time.Duration) audit.Backend { return &backend{ w: &webhook.GenericWebhook{ RestClient: rc, InitialBackoff: initialBackoff, }, name: fmt.Sprintf("dynamic_%s", PluginName), } } // NewBackend returns an audit backend that sends events over HTTP to an external service. func NewBackend(kubeConfigFile string, groupVersion schema.GroupVersion, initialBackoff time.Duration) (audit.Backend, error) { w, err := loadWebhook(kubeConfigFile, groupVersion, initialBackoff) if err != nil { return nil, err } return &backend{w: w, name: PluginName}, nil } func (b *backend) Run(stopCh <-chan struct{}) error { return nil } func (b *backend) Shutdown() { // nothing to do here } func (b *backend) ProcessEvents(ev ...*auditinternal.Event) bool { if err := b.processEvents(ev...); err != nil { audit.HandlePluginError(b.String(), err, ev...) return false } return true } func (b *backend) processEvents(ev ...*auditinternal.Event) error { var list auditinternal.EventList for _, e := range ev { list.Items = append(list.Items, *e) } return b.w.WithExponentialBackoff(func() rest.Result { return b.w.RestClient.Post().Body(&list).Do() }).Error() } func (b *backend) String() string { return b.name }
Stackdriver/heapster
vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/plugin/pkg/audit/webhook/webhook.go
GO
apache-2.0
3,002
var xor = require('buffer-xor') function incr32 (iv) { var len = iv.length var item while (len--) { item = iv.readUInt8(len) if (item === 255) { iv.writeUInt8(0, len) } else { item++ iv.writeUInt8(item, len) break } } } function getBlock (self) { var out = self._cipher.encryptBlockRaw(self._prev) incr32(self._prev) return out } var blockSize = 16 exports.encrypt = function (self, chunk) { var chunkNum = Math.ceil(chunk.length / blockSize) var start = self._cache.length self._cache = Buffer.concat([ self._cache, Buffer.allocUnsafe(chunkNum * blockSize) ]) for (var i = 0; i < chunkNum; i++) { var out = getBlock(self) var offset = start + i * blockSize self._cache.writeUInt32BE(out[0], offset + 0) self._cache.writeUInt32BE(out[1], offset + 4) self._cache.writeUInt32BE(out[2], offset + 8) self._cache.writeUInt32BE(out[3], offset + 12) } var pad = self._cache.slice(0, chunk.length) self._cache = self._cache.slice(chunk.length) return xor(chunk, pad) }
Lenny12/trafficSlotter
src/webservice/node_modules/browserify-aes/modes/ctr.js
JavaScript
mit
1,068
/** * @file * A Backbone View that provides keyboard interaction for a contextual link. */ (function (Drupal, Backbone) { Drupal.contextual.KeyboardView = Backbone.View.extend(/** @lends Drupal.contextual.KeyboardView# */{ /** * @type {object} */ events: { 'focus .trigger': 'focus', 'focus .contextual-links a': 'focus', 'blur .trigger': function () { this.model.blur(); }, 'blur .contextual-links a': function () { // Set up a timeout to allow a user to tab between the trigger and the // contextual links without the menu dismissing. const that = this; this.timer = window.setTimeout(() => { that.model.close().blur(); }, 150); }, }, /** * Provides keyboard interaction for a contextual link. * * @constructs * * @augments Backbone.View */ initialize() { /** * The timer is used to create a delay before dismissing the contextual * links on blur. This is only necessary when keyboard users tab into * contextual links without edit mode (i.e. without TabbingManager). * That means that if we decide to disable tabbing of contextual links * without edit mode, all this timer logic can go away. * * @type {NaN|number} */ this.timer = NaN; }, /** * Sets focus on the model; Clears the timer that dismisses the links. */ focus() { // Clear the timeout that might have been set by blurring a link. window.clearTimeout(this.timer); this.model.focus(); }, }); }(Drupal, Backbone));
leorawe/sci-base
web/core/modules/contextual/js/views/KeyboardView.es6.js
JavaScript
mit
1,655
// Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. using System.Globalization; namespace System.IO { /// <summary> /// Provides a string parser that may be used instead of String.Split /// to avoid unnecessary string and array allocations. /// </summary> internal struct StringParser { /// <summary>The string being parsed.</summary> private readonly string _buffer; /// <summary>The separator character used to separate subcomponents of the larger string.</summary> private readonly char _separator; /// <summary>true if empty subcomponents should be skipped; false to treat them as valid entries.</summary> private readonly bool _skipEmpty; /// <summary>The starting index from which to parse the current entry.</summary> private int _startIndex; /// <summary>The ending index that represents the next index after the last character that's part of the current entry.</summary> private int _endIndex; /// <summary>Initialize the StringParser.</summary> /// <param name="buffer">The string to parse.</param> /// <param name="separator">The separator character used to separate subcomponents of <paramref name="buffer"/>.</param> /// <param name="skipEmpty">true if empty subcomponents should be skipped; false to treat them as valid entries. Defaults to false.</param> public StringParser(string buffer, char separator, bool skipEmpty = false) { if (buffer == null) { throw new ArgumentNullException("buffer"); } _buffer = buffer; _separator = separator; _skipEmpty = skipEmpty; _startIndex = -1; _endIndex = -1; } /// <summary>Moves to the next component of the string.</summary> /// <returns>true if there is a next component to be parsed; otherwise, false.</returns> public bool MoveNext() { if (_buffer == null) { throw new InvalidOperationException(); } while (true) { if (_endIndex >= _buffer.Length) { _startIndex = _endIndex; return false; } int nextSeparator = _buffer.IndexOf(_separator, _endIndex + 1); _startIndex = _endIndex + 1; _endIndex = nextSeparator >= 0 ? nextSeparator : _buffer.Length; if (!_skipEmpty || _endIndex > _startIndex + 1) { return true; } } } /// <summary> /// Moves to the next component of the string. If there isn't one, it throws an exception. /// </summary> public void MoveNextOrFail() { if (!MoveNext()) { ThrowForInvalidData(); } } /// <summary> /// Moves to the next component of the string and returns it as a string. /// </summary> /// <returns></returns> public string MoveAndExtractNext() { MoveNextOrFail(); return _buffer.Substring(_startIndex, _endIndex - _startIndex); } /// <summary> /// Gets the current subcomponent of the string as a string. /// </summary> public string ExtractCurrent() { if (_buffer == null || _startIndex == -1) { throw new InvalidOperationException(); } return _buffer.Substring(_startIndex, _endIndex - _startIndex); } /// <summary>Moves to the next component and parses it as an Int32.</summary> public int ParseNextInt32() { int result; if (!int.TryParse(MoveAndExtractNext(), NumberStyles.Integer, CultureInfo.InvariantCulture, out result)) { ThrowForInvalidData(); } return result; } /// <summary>Moves to the next component and parses it as an Int64.</summary> public long ParseNextInt64() { long result; if (!long.TryParse(MoveAndExtractNext(), NumberStyles.Integer, CultureInfo.InvariantCulture, out result)) { ThrowForInvalidData(); } return result; } /// <summary>Moves to the next component and parses it as a UInt32.</summary> public uint ParseNextUInt32() { uint result; if (!uint.TryParse(MoveAndExtractNext(), NumberStyles.Integer, CultureInfo.InvariantCulture, out result)) { ThrowForInvalidData(); } return result; } /// <summary>Moves to the next component and parses it as a UInt64.</summary> public ulong ParseNextUInt64() { ulong result; if (!ulong.TryParse(MoveAndExtractNext(), NumberStyles.Integer, CultureInfo.InvariantCulture, out result)) { ThrowForInvalidData(); } return result; } /// <summary>Moves to the next component and parses it as a Char.</summary> public char ParseNextChar() { char result; if (!char.TryParse(MoveAndExtractNext(), out result)) { ThrowForInvalidData(); } return result; } internal delegate T ParseRawFunc<T>(string buffer, ref int startIndex, ref int endIndex); /// <summary> /// Moves to the next component and hands the raw buffer and indexing data to a selector function /// that can validate and return the appropriate data from the component. /// </summary> internal T ParseRaw<T>(ParseRawFunc<T> selector) { MoveNextOrFail(); return selector(_buffer, ref _startIndex, ref _endIndex); } /// <summary>Throws unconditionally for invalid data.</summary> private static void ThrowForInvalidData() { throw new InvalidDataException(); } } }
vs-team/corefx
src/Common/src/System/IO/StringParser.cs
C#
mit
6,366
/**************************************************************************** Copyright (c) 2012-2013 cocos2d-x.org http://www.cocos2d-x.org Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ****************************************************************************/ #include "ProtocolAds.h" #include "PluginJniHelper.h" #include <android/log.h> #include "PluginUtils.h" #include "PluginJavaData.h" namespace cocos2d { namespace plugin { extern "C" { JNIEXPORT void JNICALL Java_org_cocos2dx_plugin_AdsWrapper_nativeOnAdsResult(JNIEnv* env, jobject thiz, jstring className, jint ret, jstring msg) { std::string strMsg = PluginJniHelper::jstring2string(msg); std::string strClassName = PluginJniHelper::jstring2string(className); PluginProtocol* pPlugin = PluginUtils::getPluginPtr(strClassName); PluginUtils::outputLog("ProtocolAds", "nativeOnAdsResult(), Get plugin ptr : %p", pPlugin); if (pPlugin != NULL) { PluginUtils::outputLog("ProtocolAds", "nativeOnAdsResult(), Get plugin name : %s", pPlugin->getPluginName()); ProtocolAds* pAds = dynamic_cast<ProtocolAds*>(pPlugin); if (pAds != NULL) { AdsListener* listener = pAds->getAdsListener(); if (listener) { listener->onAdsResult((AdsResultCode) ret, strMsg.c_str()); } else { ProtocolAds::ProtocolAdsCallback callback = pAds->getCallback(); if(callback) { callback(ret, strMsg); } } } } } JNIEXPORT void JNICALL Java_org_cocos2dx_plugin_AdsWrapper_nativeOnPlayerGetPoints(JNIEnv* env, jobject thiz, jstring className, jint points) { std::string strClassName = PluginJniHelper::jstring2string(className); PluginProtocol* pPlugin = PluginUtils::getPluginPtr(strClassName); PluginUtils::outputLog("ProtocolAds", "nativeOnPlayerGetPoints(), Get plugin ptr : %p", pPlugin); if (pPlugin != NULL) { PluginUtils::outputLog("ProtocolAds", "nativeOnPlayerGetPoints(), Get plugin name : %s", pPlugin->getPluginName()); ProtocolAds* pAds = dynamic_cast<ProtocolAds*>(pPlugin); if (pAds != NULL) { AdsListener* listener = pAds->getAdsListener(); if (listener) { listener->onPlayerGetPoints(pAds, points); } } } } } ProtocolAds::ProtocolAds() : _listener(NULL) { } ProtocolAds::~ProtocolAds() { } void ProtocolAds::configDeveloperInfo(TAdsDeveloperInfo devInfo) { if (devInfo.empty()) { PluginUtils::outputLog("ProtocolAds", "The application info is empty!"); return; } else { PluginJavaData* pData = PluginUtils::getPluginJavaData(this); PluginJniMethodInfo t; if (PluginJniHelper::getMethodInfo(t , pData->jclassName.c_str() , "configDeveloperInfo" , "(Ljava/util/Hashtable;)V")) { // generate the hashtable from map jobject obj_Map = PluginUtils::createJavaMapObject(&devInfo); // invoke java method t.env->CallVoidMethod(pData->jobj, t.methodID, obj_Map); t.env->DeleteLocalRef(obj_Map); t.env->DeleteLocalRef(t.classID); } } } void ProtocolAds::showAds(TAdsInfo info, AdsPos pos) { PluginJavaData* pData = PluginUtils::getPluginJavaData(this); PluginJniMethodInfo t; PluginUtils::outputLog("ProtocolAds", "Class name : %s", pData->jclassName.c_str()); if (PluginJniHelper::getMethodInfo(t , pData->jclassName.c_str() , "showAds" , "(Ljava/util/Hashtable;I)V")) { jobject obj_Map = PluginUtils::createJavaMapObject(&info); t.env->CallVoidMethod(pData->jobj, t.methodID, obj_Map, pos); t.env->DeleteLocalRef(obj_Map); t.env->DeleteLocalRef(t.classID); } } void ProtocolAds::hideAds(TAdsInfo info) { PluginJavaData* pData = PluginUtils::getPluginJavaData(this); PluginJniMethodInfo t; PluginUtils::outputLog("ProtocolAds", "Class name : %s", pData->jclassName.c_str()); if (PluginJniHelper::getMethodInfo(t , pData->jclassName.c_str() , "hideAds" , "(Ljava/util/Hashtable;)V")) { jobject obj_Map = PluginUtils::createJavaMapObject(&info); t.env->CallVoidMethod(pData->jobj, t.methodID, obj_Map); t.env->DeleteLocalRef(obj_Map); t.env->DeleteLocalRef(t.classID); } } void ProtocolAds::queryPoints() { PluginUtils::callJavaFunctionWithName(this, "queryPoints"); } void ProtocolAds::spendPoints(int points) { PluginUtils::callJavaFunctionWithName_oneParam(this, "spendPoints", "(I)V", points); } }} // namespace cocos2d { namespace plugin {
dios-game/dios-cocos
src/oslibs/cocos/cocos-src/plugin/protocols/platform/android/ProtocolAds.cpp
C++
mit
5,570
<!DOCTYPE html> <html lang="en"> <head> <title>three.js webgl - sea3d / skin</title> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, user-scalable=no, minimum-scale=1.0, maximum-scale=1.0"> <style> body { font-family: Monospace; background-color: #000; margin: 0px; overflow: hidden; } #info { color: #fff; position: absolute; top: 10px; width: 100%; text-align: center; z-index: 100; display:block; } a { color: white } </style> </head> <body> <div id="info"> <a href="http://threejs.org" target="_blank">three.js</a> - model by <a href="https://github.com/sunag/sea3d" style="color:#FFFFF" target="_blank">sea3d</a> <br/>BoneObject: Object3D attached in a Bone <br/>Click to hidden/show the hat - Right click to run </div> <script src="../build/three.min.js"></script> <script src="js/controls/OrbitControls.js"></script> <script src="js/postprocessing/EffectComposer.js"></script> <script src="js/postprocessing/RenderPass.js"></script> <script src="js/postprocessing/ShaderPass.js"></script> <script src="js/postprocessing/MaskPass.js"></script> <script src="js/shaders/CopyShader.js"></script> <script src="js/shaders/ColorCorrectionShader.js"></script> <script src="js/shaders/VignetteShader.js"></script> <script src="js/loaders/sea3d/SEA3D.js"></script> <script src="js/loaders/sea3d/SEA3DLZMA.js"></script> <script src="js/loaders/sea3d/SEA3DLoader.js"></script> <script src="js/Detector.js"></script> <script src="js/libs/stats.min.js"></script> <script> if ( ! Detector.webgl ) Detector.addGetWebGLMessage(); var container, stats; var camera, scene, renderer, composer, controls, player, hat; var loader; // Initialize Three.JS init(); // // SEA3D Loader // loader = new THREE.SEA3D( { autoPlay : true, // Auto play animations container : scene, // Container to add models multiplier : .6 // Light multiplier } ); loader.onComplete = function( e ) { // Get the first camera from SEA3D Studio // use loader.get... to get others objects var cam = loader.cameras[0]; camera.position.copy( cam.position ); camera.rotation.copy( cam.rotation ); controls = new THREE.OrbitControls( camera ); // get meshes player = loader.getMesh("Player"); hat = loader.getMesh("Hat"); // events window.addEventListener( 'click', onMouseClick, false ); window.addEventListener( 'contextmenu', onRightClick, false ); animate(); }; loader.load( './models/sea3d/skin.tjs.sea' ); // function init() { scene = new THREE.Scene(); container = document.createElement( 'div' ); document.body.appendChild( container ); camera = new THREE.PerspectiveCamera( 45, window.innerWidth / window.innerHeight, 1, 2000 ); camera.position.set( 1000, - 300, 1000 ); renderer = new THREE.WebGLRenderer(); renderer.setPixelRatio( window.devicePixelRatio ); renderer.setSize( window.innerWidth, window.innerHeight ); renderer.setClearColor( 0x333333, 1 ); container.appendChild( renderer.domElement ); stats = new Stats(); stats.domElement.style.position = 'absolute'; stats.domElement.style.top = '0px'; container.appendChild( stats.domElement ); // post-processing composer = new THREE.EffectComposer( renderer ); var renderPass = new THREE.RenderPass( scene, camera ); var copyPass = new THREE.ShaderPass( THREE.CopyShader ); composer.addPass( renderPass ); var vh = 1.4, vl = 1.2; var colorCorrectionPass = new THREE.ShaderPass( THREE.ColorCorrectionShader ); colorCorrectionPass.uniforms[ "powRGB" ].value = new THREE.Vector3( vh, vh, vh ); colorCorrectionPass.uniforms[ "mulRGB" ].value = new THREE.Vector3( vl, vl, vl ); composer.addPass( colorCorrectionPass ); var vignettePass = new THREE.ShaderPass( THREE.VignetteShader ); vignettePass.uniforms[ "darkness" ].value = 1.0; composer.addPass( vignettePass ); composer.addPass( copyPass ); copyPass.renderToScreen = true; // extra lights scene.add( new THREE.AmbientLight( 0x333333 ) ); // events window.addEventListener( 'resize', onWindowResize, false ); } function onRightClick( e ) { // play character animation if (player.currentAnimation.name == "idle") player.play("run", .5); else player.play("idle", .5); e.preventDefault(); } function onMouseClick( e ) { if (e.button != 0) return; var hat = loader.getMesh("Hat"); hat.visible = !hat.visible; } function onWindowResize() { camera.aspect = window.innerWidth / window.innerHeight; camera.updateProjectionMatrix(); renderer.setSize( window.innerWidth, window.innerHeight ); } // var clock = new THREE.Clock(); function animate() { var delta = clock.getDelta(); requestAnimationFrame( animate ); // Update SEA3D Animations THREE.SEA3D.AnimationHandler.update( delta ); // Update Three.JS Animations THREE.AnimationHandler.update( delta ); render( delta ); stats.update(); } function render( dlt ) { //renderer.render( scene, camera ); composer.render( dlt ); } </script> </body> </html>
nilo916/3d_test
test/js/doob/examples/webgl_loader_sea3d_skinning.html
HTML
mit
5,356
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: atomic_host short_description: Manage the atomic host platform description: - Manage the atomic host platform. - Rebooting of Atomic host platform should be done outside this module. version_added: "2.2" author: - Saravanan KR (@krsacme) notes: - Host should be an atomic platform (verified by existence of '/run/ostree-booted' file). requirements: - atomic - python >= 2.6 options: revision: description: - The version number of the atomic host to be deployed. Providing C(latest) will upgrade to the latest available version. default: latest aliases: [ version ] ''' EXAMPLES = ''' - name: Upgrade the atomic host platform to the latest version (atomic host upgrade) atomic_host: revision: latest - name: Deploy a specific revision as the atomic host (atomic host deploy 23.130) atomic_host: revision: 23.130 ''' RETURN = ''' msg: description: The command standard output returned: always type: str sample: 'Already on latest' ''' import os import traceback from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native def core(module): revision = module.params['revision'] args = [] module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') if revision == 'latest': args = ['atomic', 'host', 'upgrade'] else: args = ['atomic', 'host', 'deploy', revision] out = {} err = {} rc = 0 rc, out, err = module.run_command(args, check_rc=False) if rc == 77 and revision == 'latest': module.exit_json(msg="Already on latest", changed=False) elif rc != 0: module.fail_json(rc=rc, msg=err) else: module.exit_json(msg=out, changed=True) def main(): module = AnsibleModule( argument_spec=dict( revision=dict(type='str', default='latest', aliases=["version"]), ), ) # Verify that the platform is atomic host if not os.path.exists("/run/ostree-booted"): module.fail_json(msg="Module atomic_host is applicable for Atomic Host Platforms only") try: core(module) except Exception as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) if __name__ == '__main__': main()
alxgu/ansible
lib/ansible/modules/cloud/atomic/atomic_host.py
Python
gpl-3.0
2,728
/* * Copyright (c) 2010, WSO2 Inc. (http://www.wso2.org) All Rights Reserved. * * WSO2 Inc. licenses this file to you under the Apache License, * Version 2.0 (the "License"); you may not use this file except * in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.wso2.carbon.claim.mgt.dto; public class ClaimMappingDTO { private ClaimDTO claim; private String mappedAttribute; private ClaimAttributeDTO[] mappedAttributes; public ClaimDTO getClaim() { return claim; } public void setClaim(ClaimDTO claim) { this.claim = claim; } public String getMappedAttribute() { return mappedAttribute; } public void setMappedAttribute(String mappedAttribute) { this.mappedAttribute = mappedAttribute; } public ClaimAttributeDTO[] getMappedAttributes() { if (mappedAttributes != null) { return mappedAttributes.clone(); } else { return new ClaimAttributeDTO[0]; } } public void setMappedAttributes(ClaimAttributeDTO[] mappedAttributes) { if (mappedAttributes != null) { this.mappedAttributes = mappedAttributes.clone(); } } }
pulasthi7/carbon-identity
components/claim-mgt/org.wso2.carbon.claim.mgt/src/main/java/org/wso2/carbon/claim/mgt/dto/ClaimMappingDTO.java
Java
apache-2.0
1,611
<?php # # SmartyPants - Smart punctuation for web sites # # by John Gruber # <http://daringfireball.net> # # PHP port by Michel Fortin # <http://www.michelf.com/> # # Copyright (c) 2003-2004 John Gruber # Copyright (c) 2004-2005 Michel Fortin # global $SmartyPantsPHPVersion, $SmartyPantsSyntaxVersion, $smartypants_attr, $sp_tags_to_skip; $SmartyPantsPHPVersion = '1.5.1e'; # Fru 9 Dec 2005 $SmartyPantsSyntaxVersion = '1.5.1'; # Fri 12 Mar 2004 # Configurable variables: $smartypants_attr = "1"; # Change this to configure. # 1 => "--" for em-dashes; no en-dash support # 2 => "---" for em-dashes; "--" for en-dashes # 3 => "--" for em-dashes; "---" for en-dashes # See docs for more configuration options. # Globals: $sp_tags_to_skip = '<(/?)(?:pre|code|kbd|script|math)[\s>]'; # -- WordPress plugin interface ----------------------------------------------- /* Plugin Name: SmartyPants Plugin URI: http://www.michelf.com/projects/php-smartypants/ Description: SmartyPants is a web publishing utility that translates plain ASCII punctuation characters into &#8220;smart&#8221; typographic punctuation HTML entities. This plugin <strong>replace the default WordPress Texturize algorithm</strong> for the content and the title of your posts, the comments body and author name, and everywhere else Texturize normally apply. Based on the original Perl version by <a href="http://daringfireball.net/">John Gruber</a>. Version: 1.5.1e Author: Michel Fortin Author URI: http://www.michelf.com/ */ if (isset($wp_version)) { # Remove default Texturize filter that would conflict with SmartyPants. remove_filter('category_description', 'wptexturize'); remove_filter('list_cats', 'wptexturize'); remove_filter('comment_author', 'wptexturize'); remove_filter('comment_text', 'wptexturize'); remove_filter('single_post_title', 'wptexturize'); remove_filter('the_title', 'wptexturize'); remove_filter('the_content', 'wptexturize'); remove_filter('the_excerpt', 'wptexturize'); # Add SmartyPants filter with priority 10 (same as Texturize). add_filter('category_description', 'SmartyPants', 10); add_filter('list_cats', 'SmartyPants', 10); add_filter('comment_author', 'SmartyPants', 10); add_filter('comment_text', 'SmartyPants', 10); add_filter('single_post_title', 'SmartyPants', 10); add_filter('the_title', 'SmartyPants', 10); add_filter('the_content', 'SmartyPants', 10); add_filter('the_excerpt', 'SmartyPants', 10); } # -- Smarty Modifier Interface ------------------------------------------------ function smarty_modifier_smartypants($text, $attr = NULL) { return SmartyPants($text, $attr); } function SmartyPants($text, $attr = NULL, $ctx = NULL) { global $smartypants_attr, $sp_tags_to_skip; # Paramaters: $text; # text to be parsed $attr; # value of the smart_quotes="" attribute $ctx; # MT context object (unused) if ($attr == NULL) $attr = $smartypants_attr; # Options to specify which transformations to make: $do_stupefy = FALSE; $convert_quot = 0; # should we translate &quot; entities into normal quotes? # Parse attributes: # 0 : do nothing # 1 : set all # 2 : set all, using old school en- and em- dash shortcuts # 3 : set all, using inverted old school en and em- dash shortcuts # # q : quotes # b : backtick quotes (``double'' only) # B : backtick quotes (``double'' and `single') # d : dashes # D : old school dashes # i : inverted old school dashes # e : ellipses # w : convert &quot; entities to " for Dreamweaver users if ($attr == "0") { # Do nothing. return $text; } else if ($attr == "1") { # Do everything, turn all options on. $do_quotes = 1; $do_backticks = 1; $do_dashes = 1; $do_ellipses = 1; } else if ($attr == "2") { # Do everything, turn all options on, use old school dash shorthand. $do_quotes = 1; $do_backticks = 1; $do_dashes = 2; $do_ellipses = 1; } else if ($attr == "3") { # Do everything, turn all options on, use inverted old school dash shorthand. $do_quotes = 1; $do_backticks = 1; $do_dashes = 3; $do_ellipses = 1; } else if ($attr == "-1") { # Special "stupefy" mode. $do_stupefy = 1; } else { $chars = preg_split('//', $attr); foreach ($chars as $c){ if ($c == "q") { $do_quotes = 1; } else if ($c == "b") { $do_backticks = 1; } else if ($c == "B") { $do_backticks = 2; } else if ($c == "d") { $do_dashes = 1; } else if ($c == "D") { $do_dashes = 2; } else if ($c == "i") { $do_dashes = 3; } else if ($c == "e") { $do_ellipses = 1; } else if ($c == "w") { $convert_quot = 1; } else { # Unknown attribute option, ignore. } } } $tokens = _TokenizeHTML($text); $result = ''; $in_pre = 0; # Keep track of when we're inside <pre> or <code> tags. $prev_token_last_char = ""; # This is a cheat, used to get some context # for one-character tokens that consist of # just a quote char. What we do is remember # the last character of the previous text # token, to use as context to curl single- # character quote tokens correctly. foreach ($tokens as $cur_token) { if ($cur_token[0] == "tag") { # Don't mess with quotes inside tags. $result .= $cur_token[1]; if (preg_match("@$sp_tags_to_skip@", $cur_token[1], $matches)) { $in_pre = isset($matches[1]) && $matches[1] == '/' ? 0 : 1; } } else { $t = $cur_token[1]; $last_char = substr($t, -1); # Remember last char of this token before processing. if (! $in_pre) { $t = ProcessEscapes($t); if ($convert_quot) { $t = preg_replace('/&quot;/', '"', $t); } if ($do_dashes) { if ($do_dashes == 1) $t = EducateDashes($t); if ($do_dashes == 2) $t = EducateDashesOldSchool($t); if ($do_dashes == 3) $t = EducateDashesOldSchoolInverted($t); } if ($do_ellipses) $t = EducateEllipses($t); # Note: backticks need to be processed before quotes. if ($do_backticks) { $t = EducateBackticks($t); if ($do_backticks == 2) $t = EducateSingleBackticks($t); } if ($do_quotes) { if ($t == "'") { # Special case: single-character ' token if (preg_match('/\S/', $prev_token_last_char)) { $t = "&#8217;"; } else { $t = "&#8216;"; } } else if ($t == '"') { # Special case: single-character " token if (preg_match('/\S/', $prev_token_last_char)) { $t = "&#8221;"; } else { $t = "&#8220;"; } } else { # Normal case: $t = EducateQuotes($t); } } if ($do_stupefy) $t = StupefyEntities($t); } $prev_token_last_char = $last_char; $result .= $t; } } return $result; } function SmartQuotes($text, $attr = NULL, $ctx = NULL) { global $smartypants_attr, $sp_tags_to_skip; # Paramaters: $text; # text to be parsed $attr; # value of the smart_quotes="" attribute $ctx; # MT context object (unused) if ($attr == NULL) $attr = $smartypants_attr; $do_backticks; # should we educate ``backticks'' -style quotes? if ($attr == 0) { # do nothing; return $text; } else if ($attr == 2) { # smarten ``backticks'' -style quotes $do_backticks = 1; } else { $do_backticks = 0; } # Special case to handle quotes at the very end of $text when preceded by # an HTML tag. Add a space to give the quote education algorithm a bit of # context, so that it can guess correctly that it's a closing quote: $add_extra_space = 0; if (preg_match("/>['\"]\\z/", $text)) { $add_extra_space = 1; # Remember, so we can trim the extra space later. $text .= " "; } $tokens = _TokenizeHTML($text); $result = ''; $in_pre = 0; # Keep track of when we're inside <pre> or <code> tags $prev_token_last_char = ""; # This is a cheat, used to get some context # for one-character tokens that consist of # just a quote char. What we do is remember # the last character of the previous text # token, to use as context to curl single- # character quote tokens correctly. foreach ($tokens as $cur_token) { if ($cur_token[0] == "tag") { # Don't mess with quotes inside tags $result .= $cur_token[1]; if (preg_match("@$sp_tags_to_skip@", $cur_token[1], $matches)) { $in_pre = isset($matches[1]) && $matches[1] == '/' ? 0 : 1; } } else { $t = $cur_token[1]; $last_char = substr($t, -1); # Remember last char of this token before processing. if (! $in_pre) { $t = ProcessEscapes($t); if ($do_backticks) { $t = EducateBackticks($t); } if ($t == "'") { # Special case: single-character ' token if (preg_match('/\S/', $prev_token_last_char)) { $t = "&#8217;"; } else { $t = "&#8216;"; } } else if ($t == '"') { # Special case: single-character " token if (preg_match('/\S/', $prev_token_last_char)) { $t = "&#8221;"; } else { $t = "&#8220;"; } } else { # Normal case: $t = EducateQuotes($t); } } $prev_token_last_char = $last_char; $result .= $t; } } if ($add_extra_space) { preg_replace('/ \z/', '', $result); # Trim trailing space if we added one earlier. } return $result; } function SmartDashes($text, $attr = NULL, $ctx = NULL) { global $smartypants_attr, $sp_tags_to_skip; # Paramaters: $text; # text to be parsed $attr; # value of the smart_dashes="" attribute $ctx; # MT context object (unused) if ($attr == NULL) $attr = $smartypants_attr; # reference to the subroutine to use for dash education, default to EducateDashes: $dash_sub_ref = 'EducateDashes'; if ($attr == 0) { # do nothing; return $text; } else if ($attr == 2) { # use old smart dash shortcuts, "--" for en, "---" for em $dash_sub_ref = 'EducateDashesOldSchool'; } else if ($attr == 3) { # inverse of 2, "--" for em, "---" for en $dash_sub_ref = 'EducateDashesOldSchoolInverted'; } $tokens; $tokens = _TokenizeHTML($text); $result = ''; $in_pre = 0; # Keep track of when we're inside <pre> or <code> tags foreach ($tokens as $cur_token) { if ($cur_token[0] == "tag") { # Don't mess with quotes inside tags $result .= $cur_token[1]; if (preg_match("@$sp_tags_to_skip@", $cur_token[1], $matches)) { $in_pre = isset($matches[1]) && $matches[1] == '/' ? 0 : 1; } } else { $t = $cur_token[1]; if (! $in_pre) { $t = ProcessEscapes($t); $t = $dash_sub_ref($t); } $result .= $t; } } return $result; } function SmartEllipses($text, $attr = NULL, $ctx = NULL) { # Paramaters: $text; # text to be parsed $attr; # value of the smart_ellipses="" attribute $ctx; # MT context object (unused) if ($attr == NULL) $attr = $smartypants_attr; if ($attr == 0) { # do nothing; return $text; } $tokens; $tokens = _TokenizeHTML($text); $result = ''; $in_pre = 0; # Keep track of when we're inside <pre> or <code> tags foreach ($tokens as $cur_token) { if ($cur_token[0] == "tag") { # Don't mess with quotes inside tags $result .= $cur_token[1]; if (preg_match("@$sp_tags_to_skip@", $cur_token[1], $matches)) { $in_pre = isset($matches[1]) && $matches[1] == '/' ? 0 : 1; } } else { $t = $cur_token[1]; if (! $in_pre) { $t = ProcessEscapes($t); $t = EducateEllipses($t); } $result .= $t; } } return $result; } function EducateQuotes($_) { # # Parameter: String. # # Returns: The string, with "educated" curly quote HTML entities. # # Example input: "Isn't this fun?" # Example output: &#8220;Isn&#8217;t this fun?&#8221; # # Make our own "punctuation" character class, because the POSIX-style # [:PUNCT:] is only available in Perl 5.6 or later: $punct_class = "[!\"#\\$\\%'()*+,-.\\/:;<=>?\\@\\[\\\\\]\\^_`{|}~]"; # Special case if the very first character is a quote # followed by punctuation at a non-word-break. Close the quotes by brute force: $_ = preg_replace( array("/^'(?=$punct_class\\B)/", "/^\"(?=$punct_class\\B)/"), array('&#8217;', '&#8221;'), $_); # Special case for double sets of quotes, e.g.: # <p>He said, "'Quoted' words in a larger quote."</p> $_ = preg_replace( array("/\"'(?=\w)/", "/'\"(?=\w)/"), array('&#8220;&#8216;', '&#8216;&#8220;'), $_); # Special case for decade abbreviations (the '80s): $_ = preg_replace("/'(?=\\d{2}s)/", '&#8217;', $_); $close_class = '[^\ \t\r\n\[\{\(\-]'; $dec_dashes = '&\#8211;|&\#8212;'; # Get most opening single quotes: $_ = preg_replace("{ ( \\s | # a whitespace char, or &nbsp; | # a non-breaking space entity, or -- | # dashes, or &[mn]dash; | # named dash entities $dec_dashes | # or decimal entities &\\#x201[34]; # or hex ) ' # the quote (?=\\w) # followed by a word character }x", '\1&#8216;', $_); # Single closing quotes: $_ = preg_replace("{ ($close_class)? ' (?(1)| # If $1 captured, then do nothing; (?=\\s | s\\b) # otherwise, positive lookahead for a whitespace ) # char or an 's' at a word ending position. This # is a special case to handle something like: # \"<i>Custer</i>'s Last Stand.\" }xi", '\1&#8217;', $_); # Any remaining single quotes should be opening ones: $_ = str_replace("'", '&#8216;', $_); # Get most opening double quotes: $_ = preg_replace("{ ( \\s | # a whitespace char, or &nbsp; | # a non-breaking space entity, or -- | # dashes, or &[mn]dash; | # named dash entities $dec_dashes | # or decimal entities &\\#x201[34]; # or hex ) \" # the quote (?=\\w) # followed by a word character }x", '\1&#8220;', $_); # Double closing quotes: $_ = preg_replace("{ ($close_class)? \" (?(1)|(?=\\s)) # If $1 captured, then do nothing; # if not, then make sure the next char is whitespace. }x", '\1&#8221;', $_); # Any remaining quotes should be opening ones. $_ = str_replace('"', '&#8220;', $_); return $_; } function EducateBackticks($_) { # # Parameter: String. # Returns: The string, with ``backticks'' -style double quotes # translated into HTML curly quote entities. # # Example input: ``Isn't this fun?'' # Example output: &#8220;Isn't this fun?&#8221; # $_ = str_replace(array("``", "''",), array('&#8220;', '&#8221;'), $_); return $_; } function EducateSingleBackticks($_) { # # Parameter: String. # Returns: The string, with `backticks' -style single quotes # translated into HTML curly quote entities. # # Example input: `Isn't this fun?' # Example output: &#8216;Isn&#8217;t this fun?&#8217; # $_ = str_replace(array("`", "'",), array('&#8216;', '&#8217;'), $_); return $_; } function EducateDashes($_) { # # Parameter: String. # # Returns: The string, with each instance of "--" translated to # an em-dash HTML entity. # $_ = str_replace('--', '&#8212;', $_); return $_; } function EducateDashesOldSchool($_) { # # Parameter: String. # # Returns: The string, with each instance of "--" translated to # an en-dash HTML entity, and each "---" translated to # an em-dash HTML entity. # # em en $_ = str_replace(array("---", "--",), array('&#8212;', '&#8211;'), $_); return $_; } function EducateDashesOldSchoolInverted($_) { # # Parameter: String. # # Returns: The string, with each instance of "--" translated to # an em-dash HTML entity, and each "---" translated to # an en-dash HTML entity. Two reasons why: First, unlike the # en- and em-dash syntax supported by # EducateDashesOldSchool(), it's compatible with existing # entries written before SmartyPants 1.1, back when "--" was # only used for em-dashes. Second, em-dashes are more # common than en-dashes, and so it sort of makes sense that # the shortcut should be shorter to type. (Thanks to Aaron # Swartz for the idea.) # # en em $_ = str_replace(array("---", "--",), array('&#8211;', '&#8212;'), $_); return $_; } function EducateEllipses($_) { # # Parameter: String. # Returns: The string, with each instance of "..." translated to # an ellipsis HTML entity. Also converts the case where # there are spaces between the dots. # # Example input: Huh...? # Example output: Huh&#8230;? # $_ = str_replace(array("...", ". . .",), '&#8230;', $_); return $_; } function StupefyEntities($_) { # # Parameter: String. # Returns: The string, with each SmartyPants HTML entity translated to # its ASCII counterpart. # # Example input: &#8220;Hello &#8212; world.&#8221; # Example output: "Hello -- world." # # en-dash em-dash $_ = str_replace(array('&#8211;', '&#8212;'), array('-', '--'), $_); # single quote open close $_ = str_replace(array('&#8216;', '&#8217;'), "'", $_); # double quote open close $_ = str_replace(array('&#8220;', '&#8221;'), '"', $_); $_ = str_replace('&#8230;', '...', $_); # ellipsis return $_; } function ProcessEscapes($_) { # # Parameter: String. # Returns: The string, with after processing the following backslash # escape sequences. This is useful if you want to force a "dumb" # quote or other character to appear. # # Escape Value # ------ ----- # \\ &#92; # \" &#34; # \' &#39; # \. &#46; # \- &#45; # \` &#96; # $_ = str_replace( array('\\\\', '\"', "\'", '\.', '\-', '\`'), array('&#92;', '&#34;', '&#39;', '&#46;', '&#45;', '&#96;'), $_); return $_; } # _TokenizeHTML is shared between PHP SmartyPants and PHP Markdown. # We only define it if it is not already defined. if (!function_exists('_TokenizeHTML')) : function _TokenizeHTML($str) { # # Parameter: String containing HTML markup. # Returns: An array of the tokens comprising the input # string. Each token is either a tag (possibly with nested, # tags contained therein, such as <a href="<MTFoo>">, or a # run of text between tags. Each element of the array is a # two-element array; the first is either 'tag' or 'text'; # the second is the actual value. # # # Regular expression derived from the _tokenize() subroutine in # Brad Choate's MTRegex plugin. # <http://www.bradchoate.com/past/mtregex.php> # $index = 0; $tokens = array(); $match = '(?s:<!(?:--.*?--\s*)+>)|'. # comment '(?s:<\?.*?\?>)|'. # processing instruction # regular tags '(?:<[/!$]?[-a-zA-Z0-9:]+\b(?>[^"\'>]+|"[^"]*"|\'[^\']*\')*>)'; $parts = preg_split("{($match)}", $str, -1, PREG_SPLIT_DELIM_CAPTURE); foreach ($parts as $part) { if (++$index % 2 && $part != '') $tokens[] = array('text', $part); else $tokens[] = array('tag', $part); } return $tokens; } endif; /* PHP SmartyPants =============== Description ----------- This is a PHP translation of the original SmartyPants quote educator written in Perl by John Gruber. SmartyPants is a web publishing utility that translates plain ASCII punctuation characters into "smart" typographic punctuation HTML entities. SmartyPants can perform the following transformations: * Straight quotes (`"` and `'`) into "curly" quote HTML entities * Backticks-style quotes (` ``like this'' `) into "curly" quote HTML entities * Dashes (`--` and `---`) into en- and em-dash entities * Three consecutive dots (`...`) into an ellipsis entity SmartyPants does not modify characters within `<pre>`, `<code>`, `<kbd>`, `<script>`, or `<math>` tag blocks. Typically, these tags are used to display text where smart quotes and other "smart punctuation" would not be appropriate, such as source code or example markup. ### Backslash Escapes ### If you need to use literal straight quotes (or plain hyphens and periods), SmartyPants accepts the following backslash escape sequences to force non-smart punctuation. It does so by transforming the escape sequence into a decimal-encoded HTML entity: Escape Value Character ------ ----- --------- \\ &#92; \ \" &#34; " \' &#39; ' \. &#46; . \- &#45; - \` &#96; ` This is useful, for example, when you want to use straight quotes as foot and inch marks: 6'2" tall; a 17" iMac. Bugs ---- To file bug reports or feature requests (other than topics listed in the Caveats section above) please send email to: <[email protected]> If the bug involves quotes being curled the wrong way, please send example text to illustrate. ### Algorithmic Shortcomings ### One situation in which quotes will get curled the wrong way is when apostrophes are used at the start of leading contractions. For example: 'Twas the night before Christmas. In the case above, SmartyPants will turn the apostrophe into an opening single-quote, when in fact it should be a closing one. I don't think this problem can be solved in the general case -- every word processor I've tried gets this wrong as well. In such cases, it's best to use the proper HTML entity for closing single-quotes (`&#8217;`) by hand. Version History --------------- 1.5.1e (9 Dec 2005) * Corrected a bug that prevented special characters from being escaped. 1.5.1d (25 May 2005) * Corrected a small bug in `_TokenizeHTML` where a Doctype declaration was not seen as HTML (smart quotes where applied inside). 1.5.1c (13 Dec 2004) * Changed a regular expression in `_TokenizeHTML` that could lead to a segmentation fault with PHP 4.3.8 on Linux. 1.5.1b (6 Sep 2004) * Corrected a problem with quotes immediately following a dash with no space between: `Text--"quoted text"--text.` * PHP SmartyPants can now be used as a modifier by the Smarty template engine. Rename the file to "modifier.smartypants.php" and put it in your smarty plugins folder. * Replaced a lot of space characters by tabs, saving about 4 KB. 1.5.1a (30 Jun 2004) * PHP Markdown and PHP Smartypants now share the same `_TokenizeHTML` function when loaded simultanously. * Changed the internals of `_TokenizeHTML` to lower the PHP version requirement to PHP 4.0.5. 1.5.1 (6 Jun 2004) * Initial release of PHP SmartyPants, based on version 1.5.1 of the original SmartyPants written in Perl. Author ------ John Gruber <http://daringfireball.net/> Ported to PHP by Michel Fortin <http://www.michelf.com/> Additional Credits ------------------ Portions of this plug-in are based on Brad Choate's nifty MTRegex plug-in. Brad Choate also contributed a few bits of source code to this plug-in. Brad Choate is a fine hacker indeed. (<http://bradchoate.com/>) Jeremy Hedley (<http://antipixel.com/>) and Charles Wiltgen (<http://playbacktime.com/>) deserve mention for exemplary beta testing. Copyright and License --------------------- Copyright (c) 2003 John Gruber <http://daringfireball.net/> All rights reserved. Copyright (c) 2004-2005 Michel Fortin <http://www.michelf.com> Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name "SmartyPants" nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. This software is provided by the copyright holders and contributors "as is" and any express or implied warranties, including, but not limited to, the implied warranties of merchantability and fitness for a particular purpose are disclaimed. In no event shall the copyright owner or contributors be liable for any direct, indirect, incidental, special, exemplary, or consequential damages (including, but not limited to, procurement of substitute goods or services; loss of use, data, or profits; or business interruption) however caused and on any theory of liability, whether in contract, strict liability, or tort (including negligence or otherwise) arising in any way out of the use of this software, even if advised of the possibility of such damage. */ ?>
wangscript/libjingle-1
trunk/third_party/libvpx/source/libvpx/examples/includes/PHP-SmartyPants-1.5.1e/smartypants.php
PHP
bsd-3-clause
28,281
<script src=/resources/testharness.js></script> <script src=/resources/testharnessreport.js></script> <div id=log></div> <script> // Note: images get always sniffed, nosniff doesn't do anything // (but note the tentative Cross-Origin Read Blocking (CORB) tests // - for example wpt/fetch/corb/img-mime-types-coverage.tentative.sub.html). var passes = [ // Empty or non-sensical MIME types null, "", "x", "x/x", // Image MIME types "image/gif", "image/png", "image/png;blah", "image/svg+xml", // CORB-protected MIME types (but note that CORB doesn't apply here, // because CORB ignores same-origin requests). "text/html", "application/xml", "application/blah+xml" ] const get_url = (mime) => { let url = "resources/image.py" if (mime != null) { url += "?type=" + encodeURIComponent(mime) } return url } passes.forEach(function(mime) { async_test(function(t) { var img = document.createElement("img") img.onerror = t.unreached_func("Unexpected error event") img.onload = t.step_func_done(function(){ assert_equals(img.width, 96) }) img.src = get_url(mime) document.body.appendChild(img) }, "URL query: " + mime) }) </script>
scheib/chromium
third_party/blink/web_tests/external/wpt/fetch/nosniff/image.html
HTML
bsd-3-clause
1,263
var inptSel={};inptSel.get=function(e){if("number"==typeof e.selectionStart)return{begin:e.selectionStart,end:e.selectionEnd};var t=document.selection.createRange();if(t&&t.parentElement()===e){var n=e.createTextRange(),a=e.createTextRange(),r=e.value.length;return n.moveToBookmark(t.getBookmark()),a.collapse(!1),n.compareEndPoints("StartToEnd",a)>-1?{begin:r,end:r}:{begin:-n.moveStart("character",-r),end:-n.moveEnd("character",-r)}}return{begin:0,end:0}},inptSel.set=function(e,t){if("object"!=typeof t&&(t={begin:t,end:t}),e.setSelectionRange)e.focus(),e.setSelectionRange(t.begin,t.end);else if(e.createTextRange){var n=e.createTextRange();n.collapse(!0),n.moveEnd("character",t.end),n.moveStart("character",t.begin),n.select()}},module.exports=inptSel;
sajochiu/cdnjs
ajax/libs/formatter.js/0.1.5/common/inpt-sel.min.js
JavaScript
mit
760
<?php /** * @copyright Copyright (C) 2005 - 2012 Open Source Matters, Inc. All rights reserved. * @license GNU General Public License version 2 or later; see LICENSE.txt */ // No direct access defined('_JEXEC') or die; jimport('joomla.application.component.view'); /** * HTML View class for the WebLinks component * * @package Joomla.Site * @subpackage com_weblinks * @since 1.5 */ class WeblinksViewWeblink extends JView { protected $state; protected $item; function display($tpl = null) { $app = JFactory::getApplication(); $params = $app->getParams(); // Get some data from the models $state = $this->get('State'); $item = $this->get('Item'); $category = $this->get('Category'); if ($this->getLayout() == 'edit') { $this->_displayEdit($tpl); return; } if ($item->url) { // redirects to url if matching id found $app->redirect($item->url); } else { //TODO create proper error handling $app->redirect(JRoute::_('index.php'), JText::_('COM_WEBLINKS_ERROR_WEBLINK_NOT_FOUND'), 'notice'); } } }
SuperFamousGuy/AdLib
tmp/install_4f31d69ae5bd9/components/com_weblinks/views/weblink/view.html.php
PHP
gpl-2.0
1,061
<?php /** * @file * Contains Drupal\Console\Command\GeneratorFormBaseCommand. */ namespace Drupal\Console\Command\Generate; class FormBaseCommand extends FormCommand { protected function configure() { $this->setFormType('FormBase'); $this->setCommandName('generate:form'); parent::configure(); } }
parkerjgit/openarch-d8
vendor/drupal/console/src/Command/Generate/FormBaseCommand.php
PHP
gpl-2.0
340
<?php /** * @package utf8 * @subpackage strings */ /** * Define UTF8_CORE as required */ if ( !defined('UTF8_CORE') ) { define('UTF8_CORE',TRUE); } //-------------------------------------------------------------------- /** * Wrapper round mb_strlen * Assumes you have mb_internal_encoding to UTF-8 already * Note: this function does not count bad bytes in the string - these * are simply ignored * @param string UTF-8 string * @return int number of UTF-8 characters in string * @package utf8 * @subpackage strings */ function utf8_strlen($str){ return mb_strlen($str); } //-------------------------------------------------------------------- /** * Assumes mbstring internal encoding is set to UTF-8 * Wrapper around mb_strpos * Find position of first occurrence of a string * @param string haystack * @param string needle (you should validate this with utf8_is_valid) * @param integer offset in characters (from left) * @return mixed integer position or FALSE on failure * @package utf8 * @subpackage strings */ function utf8_strpos($str, $search, $offset = FALSE){ if ( $offset === FALSE ) { return mb_strpos($str, $search); } else { return mb_strpos($str, $search, $offset); } } //-------------------------------------------------------------------- /** * Assumes mbstring internal encoding is set to UTF-8 * Wrapper around mb_strrpos * Find position of last occurrence of a char in a string * @param string haystack * @param string needle (you should validate this with utf8_is_valid) * @param integer (optional) offset (from left) * @return mixed integer position or FALSE on failure * @package utf8 * @subpackage strings */ function utf8_strrpos($str, $search, $offset = FALSE){ if ( $offset === FALSE ) { # Emulate behaviour of strrpos rather than raising warning if ( empty($str) ) { return FALSE; } return mb_strrpos($str, $search); } else { if ( !is_int($offset) ) { trigger_error('utf8_strrpos expects parameter 3 to be long',E_USER_WARNING); return FALSE; } $str = mb_substr($str, $offset); if ( FALSE !== ( $pos = mb_strrpos($str, $search) ) ) { return $pos + $offset; } return FALSE; } } //-------------------------------------------------------------------- /** * Assumes mbstring internal encoding is set to UTF-8 * Wrapper around mb_substr * Return part of a string given character offset (and optionally length) * @param string * @param integer number of UTF-8 characters offset (from left) * @param integer (optional) length in UTF-8 characters from offset * @return mixed string or FALSE if failure * @package utf8 * @subpackage strings */ function utf8_substr($str, $offset, $length = FALSE){ if ( $length === FALSE ) { return mb_substr($str, $offset); } else { return mb_substr($str, $offset, $length); } } //-------------------------------------------------------------------- /** * Assumes mbstring internal encoding is set to UTF-8 * Wrapper around mb_strtolower * Make a string lowercase * Note: The concept of a characters "case" only exists is some alphabets * such as Latin, Greek, Cyrillic, Armenian and archaic Georgian - it does * not exist in the Chinese alphabet, for example. See Unicode Standard * Annex #21: Case Mappings * @param string * @return mixed either string in lowercase or FALSE is UTF-8 invalid * @package utf8 * @subpackage strings */ function utf8_strtolower($str){ return mb_strtolower($str); } //-------------------------------------------------------------------- /** * Assumes mbstring internal encoding is set to UTF-8 * Wrapper around mb_strtoupper * Make a string uppercase * Note: The concept of a characters "case" only exists is some alphabets * such as Latin, Greek, Cyrillic, Armenian and archaic Georgian - it does * not exist in the Chinese alphabet, for example. See Unicode Standard * Annex #21: Case Mappings * @param string * @return mixed either string in lowercase or FALSE is UTF-8 invalid * @package utf8 * @subpackage strings */ function utf8_strtoupper($str){ return mb_strtoupper($str); }
SuperFamousGuy/AdLib
tmp/install_4f31d69ae5bd9/libraries/phputf8/mbstring/core.php
PHP
gpl-2.0
4,187
/* * arch/arm/mach-at91/at91sam9260.c * * Copyright (C) 2006 SAN People * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #include <linux/module.h> #include <linux/pm.h> #include <asm/irq.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <mach/cpu.h> #include <mach/at91_dbgu.h> #include <mach/at91sam9260.h> #include <mach/at91_pmc.h> #include <mach/at91_rstc.h> #include <mach/at91_shdwc.h> #include "soc.h" #include "generic.h" #include "clock.h" /* -------------------------------------------------------------------- * Clocks * -------------------------------------------------------------------- */ /* * The peripheral clocks. */ static struct clk pioA_clk = { .name = "pioA_clk", .pmc_mask = 1 << AT91SAM9260_ID_PIOA, .type = CLK_TYPE_PERIPHERAL, }; static struct clk pioB_clk = { .name = "pioB_clk", .pmc_mask = 1 << AT91SAM9260_ID_PIOB, .type = CLK_TYPE_PERIPHERAL, }; static struct clk pioC_clk = { .name = "pioC_clk", .pmc_mask = 1 << AT91SAM9260_ID_PIOC, .type = CLK_TYPE_PERIPHERAL, }; static struct clk adc_clk = { .name = "adc_clk", .pmc_mask = 1 << AT91SAM9260_ID_ADC, .type = CLK_TYPE_PERIPHERAL, }; static struct clk usart0_clk = { .name = "usart0_clk", .pmc_mask = 1 << AT91SAM9260_ID_US0, .type = CLK_TYPE_PERIPHERAL, }; static struct clk usart1_clk = { .name = "usart1_clk", .pmc_mask = 1 << AT91SAM9260_ID_US1, .type = CLK_TYPE_PERIPHERAL, }; static struct clk usart2_clk = { .name = "usart2_clk", .pmc_mask = 1 << AT91SAM9260_ID_US2, .type = CLK_TYPE_PERIPHERAL, }; static struct clk mmc_clk = { .name = "mci_clk", .pmc_mask = 1 << AT91SAM9260_ID_MCI, .type = CLK_TYPE_PERIPHERAL, }; static struct clk udc_clk = { .name = "udc_clk", .pmc_mask = 1 << AT91SAM9260_ID_UDP, .type = CLK_TYPE_PERIPHERAL, }; static struct clk twi_clk = { .name = "twi_clk", .pmc_mask = 1 << AT91SAM9260_ID_TWI, .type = CLK_TYPE_PERIPHERAL, }; static struct clk spi0_clk = { .name = "spi0_clk", .pmc_mask = 1 << AT91SAM9260_ID_SPI0, .type = CLK_TYPE_PERIPHERAL, }; static struct clk spi1_clk = { .name = "spi1_clk", .pmc_mask = 1 << AT91SAM9260_ID_SPI1, .type = CLK_TYPE_PERIPHERAL, }; static struct clk ssc_clk = { .name = "ssc_clk", .pmc_mask = 1 << AT91SAM9260_ID_SSC, .type = CLK_TYPE_PERIPHERAL, }; static struct clk tc0_clk = { .name = "tc0_clk", .pmc_mask = 1 << AT91SAM9260_ID_TC0, .type = CLK_TYPE_PERIPHERAL, }; static struct clk tc1_clk = { .name = "tc1_clk", .pmc_mask = 1 << AT91SAM9260_ID_TC1, .type = CLK_TYPE_PERIPHERAL, }; static struct clk tc2_clk = { .name = "tc2_clk", .pmc_mask = 1 << AT91SAM9260_ID_TC2, .type = CLK_TYPE_PERIPHERAL, }; static struct clk ohci_clk = { .name = "ohci_clk", .pmc_mask = 1 << AT91SAM9260_ID_UHP, .type = CLK_TYPE_PERIPHERAL, }; static struct clk macb_clk = { .name = "macb_clk", .pmc_mask = 1 << AT91SAM9260_ID_EMAC, .type = CLK_TYPE_PERIPHERAL, }; static struct clk isi_clk = { .name = "isi_clk", .pmc_mask = 1 << AT91SAM9260_ID_ISI, .type = CLK_TYPE_PERIPHERAL, }; static struct clk usart3_clk = { .name = "usart3_clk", .pmc_mask = 1 << AT91SAM9260_ID_US3, .type = CLK_TYPE_PERIPHERAL, }; static struct clk usart4_clk = { .name = "usart4_clk", .pmc_mask = 1 << AT91SAM9260_ID_US4, .type = CLK_TYPE_PERIPHERAL, }; static struct clk usart5_clk = { .name = "usart5_clk", .pmc_mask = 1 << AT91SAM9260_ID_US5, .type = CLK_TYPE_PERIPHERAL, }; static struct clk tc3_clk = { .name = "tc3_clk", .pmc_mask = 1 << AT91SAM9260_ID_TC3, .type = CLK_TYPE_PERIPHERAL, }; static struct clk tc4_clk = { .name = "tc4_clk", .pmc_mask = 1 << AT91SAM9260_ID_TC4, .type = CLK_TYPE_PERIPHERAL, }; static struct clk tc5_clk = { .name = "tc5_clk", .pmc_mask = 1 << AT91SAM9260_ID_TC5, .type = CLK_TYPE_PERIPHERAL, }; static struct clk *periph_clocks[] __initdata = { &pioA_clk, &pioB_clk, &pioC_clk, &adc_clk, &usart0_clk, &usart1_clk, &usart2_clk, &mmc_clk, &udc_clk, &twi_clk, &spi0_clk, &spi1_clk, &ssc_clk, &tc0_clk, &tc1_clk, &tc2_clk, &ohci_clk, &macb_clk, &isi_clk, &usart3_clk, &usart4_clk, &usart5_clk, &tc3_clk, &tc4_clk, &tc5_clk, // irq0 .. irq2 }; static struct clk_lookup periph_clocks_lookups[] = { CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.0", &spi0_clk), CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk), CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk), CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk), CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk), CLKDEV_CON_DEV_ID("t3_clk", "atmel_tcb.1", &tc3_clk), CLKDEV_CON_DEV_ID("t4_clk", "atmel_tcb.1", &tc4_clk), CLKDEV_CON_DEV_ID("t5_clk", "atmel_tcb.1", &tc5_clk), CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc_clk), }; static struct clk_lookup usart_clocks_lookups[] = { CLKDEV_CON_DEV_ID("usart", "atmel_usart.0", &mck), CLKDEV_CON_DEV_ID("usart", "atmel_usart.1", &usart0_clk), CLKDEV_CON_DEV_ID("usart", "atmel_usart.2", &usart1_clk), CLKDEV_CON_DEV_ID("usart", "atmel_usart.3", &usart2_clk), CLKDEV_CON_DEV_ID("usart", "atmel_usart.4", &usart3_clk), CLKDEV_CON_DEV_ID("usart", "atmel_usart.5", &usart4_clk), CLKDEV_CON_DEV_ID("usart", "atmel_usart.6", &usart5_clk), }; /* * The two programmable clocks. * You must configure pin multiplexing to bring these signals out. */ static struct clk pck0 = { .name = "pck0", .pmc_mask = AT91_PMC_PCK0, .type = CLK_TYPE_PROGRAMMABLE, .id = 0, }; static struct clk pck1 = { .name = "pck1", .pmc_mask = AT91_PMC_PCK1, .type = CLK_TYPE_PROGRAMMABLE, .id = 1, }; static void __init at91sam9260_register_clocks(void) { int i; for (i = 0; i < ARRAY_SIZE(periph_clocks); i++) clk_register(periph_clocks[i]); clkdev_add_table(periph_clocks_lookups, ARRAY_SIZE(periph_clocks_lookups)); clkdev_add_table(usart_clocks_lookups, ARRAY_SIZE(usart_clocks_lookups)); clk_register(&pck0); clk_register(&pck1); } static struct clk_lookup console_clock_lookup; void __init at91sam9260_set_console_clock(int id) { if (id >= ARRAY_SIZE(usart_clocks_lookups)) return; console_clock_lookup.con_id = "usart"; console_clock_lookup.clk = usart_clocks_lookups[id].clk; clkdev_add(&console_clock_lookup); } /* -------------------------------------------------------------------- * GPIO * -------------------------------------------------------------------- */ static struct at91_gpio_bank at91sam9260_gpio[] = { { .id = AT91SAM9260_ID_PIOA, .offset = AT91_PIOA, .clock = &pioA_clk, }, { .id = AT91SAM9260_ID_PIOB, .offset = AT91_PIOB, .clock = &pioB_clk, }, { .id = AT91SAM9260_ID_PIOC, .offset = AT91_PIOC, .clock = &pioC_clk, } }; static void at91sam9260_poweroff(void) { at91_sys_write(AT91_SHDW_CR, AT91_SHDW_KEY | AT91_SHDW_SHDW); } /* -------------------------------------------------------------------- * AT91SAM9260 processor initialization * -------------------------------------------------------------------- */ static void __init at91sam9xe_map_io(void) { unsigned long sram_size; switch (at91_soc_initdata.cidr & AT91_CIDR_SRAMSIZ) { case AT91_CIDR_SRAMSIZ_32K: sram_size = 2 * SZ_16K; break; case AT91_CIDR_SRAMSIZ_16K: default: sram_size = SZ_16K; } at91_init_sram(0, AT91SAM9XE_SRAM_BASE, sram_size); } static void __init at91sam9260_map_io(void) { if (cpu_is_at91sam9xe()) { at91sam9xe_map_io(); } else if (cpu_is_at91sam9g20()) { at91_init_sram(0, AT91SAM9G20_SRAM0_BASE, AT91SAM9G20_SRAM0_SIZE); at91_init_sram(1, AT91SAM9G20_SRAM1_BASE, AT91SAM9G20_SRAM1_SIZE); } else { at91_init_sram(0, AT91SAM9260_SRAM0_BASE, AT91SAM9260_SRAM0_SIZE); at91_init_sram(1, AT91SAM9260_SRAM1_BASE, AT91SAM9260_SRAM1_SIZE); } } static void __init at91sam9260_initialize(void) { at91_arch_reset = at91sam9_alt_reset; pm_power_off = at91sam9260_poweroff; at91_extern_irq = (1 << AT91SAM9260_ID_IRQ0) | (1 << AT91SAM9260_ID_IRQ1) | (1 << AT91SAM9260_ID_IRQ2); /* Register GPIO subsystem */ at91_gpio_init(at91sam9260_gpio, 3); } /* -------------------------------------------------------------------- * Interrupt initialization * -------------------------------------------------------------------- */ /* * The default interrupt priority levels (0 = lowest, 7 = highest). */ static unsigned int at91sam9260_default_irq_priority[NR_AIC_IRQS] __initdata = { 7, /* Advanced Interrupt Controller */ 7, /* System Peripherals */ 1, /* Parallel IO Controller A */ 1, /* Parallel IO Controller B */ 1, /* Parallel IO Controller C */ 0, /* Analog-to-Digital Converter */ 5, /* USART 0 */ 5, /* USART 1 */ 5, /* USART 2 */ 0, /* Multimedia Card Interface */ 2, /* USB Device Port */ 6, /* Two-Wire Interface */ 5, /* Serial Peripheral Interface 0 */ 5, /* Serial Peripheral Interface 1 */ 5, /* Serial Synchronous Controller */ 0, 0, 0, /* Timer Counter 0 */ 0, /* Timer Counter 1 */ 0, /* Timer Counter 2 */ 2, /* USB Host port */ 3, /* Ethernet */ 0, /* Image Sensor Interface */ 5, /* USART 3 */ 5, /* USART 4 */ 5, /* USART 5 */ 0, /* Timer Counter 3 */ 0, /* Timer Counter 4 */ 0, /* Timer Counter 5 */ 0, /* Advanced Interrupt Controller */ 0, /* Advanced Interrupt Controller */ 0, /* Advanced Interrupt Controller */ }; struct at91_init_soc __initdata at91sam9260_soc = { .map_io = at91sam9260_map_io, .default_irq_priority = at91sam9260_default_irq_priority, .register_clocks = at91sam9260_register_clocks, .init = at91sam9260_initialize, };
EmbeddedAndroid/linaro-android-3.1
arch/arm/mach-at91/at91sam9260.c
C
gpl-2.0
9,651
// Copyright David Abrahams 2002. // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) #ifndef CLASS_DWA200216_HPP # define CLASS_DWA200216_HPP # include <boost/python/detail/prefix.hpp> # include <boost/noncopyable.hpp> # include <boost/python/class_fwd.hpp> # include <boost/python/object/class.hpp> # include <boost/python/object.hpp> # include <boost/python/type_id.hpp> # include <boost/python/data_members.hpp> # include <boost/python/make_function.hpp> # include <boost/python/signature.hpp> # include <boost/python/init.hpp> # include <boost/python/args_fwd.hpp> # include <boost/python/object/class_metadata.hpp> # include <boost/python/object/pickle_support.hpp> # include <boost/python/object/add_to_namespace.hpp> # include <boost/python/detail/overloads_fwd.hpp> # include <boost/python/detail/operator_id.hpp> # include <boost/python/detail/def_helper.hpp> # include <boost/python/detail/force_instantiate.hpp> # include <boost/python/detail/unwrap_type_id.hpp> # include <boost/python/detail/unwrap_wrapper.hpp> # include <boost/type_traits/is_same.hpp> # include <boost/type_traits/is_member_function_pointer.hpp> # include <boost/type_traits/is_polymorphic.hpp> # include <boost/mpl/size.hpp> # include <boost/mpl/for_each.hpp> # include <boost/mpl/bool.hpp> # include <boost/mpl/not.hpp> # include <boost/detail/workaround.hpp> # if BOOST_WORKAROUND(__MWERKS__, <= 0x3004) \ /* pro9 reintroduced the bug */ \ || (BOOST_WORKAROUND(__MWERKS__, > 0x3100) \ && BOOST_WORKAROUND(__MWERKS__, BOOST_TESTED_AT(0x3201))) # define BOOST_PYTHON_NO_MEMBER_POINTER_ORDERING 1 # endif # ifdef BOOST_PYTHON_NO_MEMBER_POINTER_ORDERING # include <boost/mpl/and.hpp> # include <boost/type_traits/is_member_pointer.hpp> # endif namespace boost { namespace python { template <class DerivedVisitor> class def_visitor; enum no_init_t { no_init }; namespace detail { // This function object is used with mpl::for_each to write the id // of the type a pointer to which is passed as its 2nd compile-time // argument. into the iterator pointed to by its runtime argument struct write_type_id { write_type_id(type_info**p) : p(p) {} // Here's the runtime behavior template <class T> void operator()(T*) const { *(*p)++ = type_id<T>(); } type_info** p; }; template <class T> struct is_data_member_pointer : mpl::and_< is_member_pointer<T> , mpl::not_<is_member_function_pointer<T> > > {}; # ifdef BOOST_PYTHON_NO_MEMBER_POINTER_ORDERING # define BOOST_PYTHON_DATA_MEMBER_HELPER(D) , detail::is_data_member_pointer<D>() # define BOOST_PYTHON_YES_DATA_MEMBER , mpl::true_ # define BOOST_PYTHON_NO_DATA_MEMBER , mpl::false_ # elif defined(BOOST_NO_FUNCTION_TEMPLATE_ORDERING) # define BOOST_PYTHON_DATA_MEMBER_HELPER(D) , 0 # define BOOST_PYTHON_YES_DATA_MEMBER , int # define BOOST_PYTHON_NO_DATA_MEMBER , ... # else # define BOOST_PYTHON_DATA_MEMBER_HELPER(D) # define BOOST_PYTHON_YES_DATA_MEMBER # define BOOST_PYTHON_NO_DATA_MEMBER # endif namespace error { // // A meta-assertion mechanism which prints nice error messages and // backtraces on lots of compilers. Usage: // // assertion<C>::failed // // where C is an MPL metafunction class // template <class C> struct assertion_failed { }; template <class C> struct assertion_ok { typedef C failed; }; template <class C> struct assertion : mpl::if_<C, assertion_ok<C>, assertion_failed<C> >::type {}; // // Checks for validity of arguments used to define virtual // functions with default implementations. // template <class Default> void not_a_derived_class_member(Default) {} template <class T, class Fn> struct virtual_function_default { template <class Default> static void must_be_derived_class_member(Default const&) { // https://svn.boost.org/trac/boost/ticket/5803 //typedef typename assertion<mpl::not_<is_same<Default,Fn> > >::failed test0; # if !BOOST_WORKAROUND(__MWERKS__, <= 0x2407) typedef typename assertion<is_polymorphic<T> >::failed test1 BOOST_ATTRIBUTE_UNUSED; # endif typedef typename assertion<is_member_function_pointer<Fn> >::failed test2 BOOST_ATTRIBUTE_UNUSED; not_a_derived_class_member<Default>(Fn()); } }; } } // This is the primary mechanism through which users will expose // C++ classes to Python. template < class W // class being wrapped , class X1 // = detail::not_specified , class X2 // = detail::not_specified , class X3 // = detail::not_specified > class class_ : public objects::class_base { public: // types typedef objects::class_base base; typedef class_<W,X1,X2,X3> self; typedef typename objects::class_metadata<W,X1,X2,X3> metadata; typedef W wrapped_type; private: // types // A helper class which will contain an array of id objects to be // passed to the base class constructor struct id_vector { typedef typename metadata::bases bases; id_vector() { // Stick the derived class id into the first element of the array ids[0] = detail::unwrap_type_id((W*)0, (W*)0); // Write the rest of the elements into succeeding positions. type_info* p = ids + 1; mpl::for_each(detail::write_type_id(&p), (bases*)0, (add_pointer<mpl::_>*)0); } BOOST_STATIC_CONSTANT( std::size_t, size = mpl::size<bases>::value + 1); type_info ids[size]; }; friend struct id_vector; public: // constructors // Construct with the class name, with or without docstring, and default __init__() function class_(char const* name, char const* doc = 0); // Construct with class name, no docstring, and an uncallable __init__ function class_(char const* name, no_init_t); // Construct with class name, docstring, and an uncallable __init__ function class_(char const* name, char const* doc, no_init_t); // Construct with class name and init<> function template <class DerivedT> inline class_(char const* name, init_base<DerivedT> const& i) : base(name, id_vector::size, id_vector().ids) { this->initialize(i); } // Construct with class name, docstring and init<> function template <class DerivedT> inline class_(char const* name, char const* doc, init_base<DerivedT> const& i) : base(name, id_vector::size, id_vector().ids, doc) { this->initialize(i); } public: // member functions // Generic visitation template <class Derived> self& def(def_visitor<Derived> const& visitor) { visitor.visit(*this); return *this; } // Wrap a member function or a non-member function which can take // a T, T cv&, or T cv* as its first parameter, a callable // python object, or a generic visitor. template <class F> self& def(char const* name, F f) { this->def_impl( detail::unwrap_wrapper((W*)0) , name, f, detail::def_helper<char const*>(0), &f); return *this; } template <class A1, class A2> self& def(char const* name, A1 a1, A2 const& a2) { this->def_maybe_overloads(name, a1, a2, &a2); return *this; } template <class Fn, class A1, class A2> self& def(char const* name, Fn fn, A1 const& a1, A2 const& a2) { // The arguments are definitely: // def(name, function, policy, doc_string) // def(name, function, doc_string, policy) this->def_impl( detail::unwrap_wrapper((W*)0) , name, fn , detail::def_helper<A1,A2>(a1,a2) , &fn); return *this; } template <class Fn, class A1, class A2, class A3> self& def(char const* name, Fn fn, A1 const& a1, A2 const& a2, A3 const& a3) { this->def_impl( detail::unwrap_wrapper((W*)0) , name, fn , detail::def_helper<A1,A2,A3>(a1,a2,a3) , &fn); return *this; } // // Data member access // template <class D> self& def_readonly(char const* name, D const& d, char const* doc=0) { return this->def_readonly_impl(name, d, doc BOOST_PYTHON_DATA_MEMBER_HELPER(D)); } template <class D> self& def_readwrite(char const* name, D const& d, char const* doc=0) { return this->def_readwrite_impl(name, d, doc BOOST_PYTHON_DATA_MEMBER_HELPER(D)); } template <class D> self& def_readonly(char const* name, D& d, char const* doc=0) { return this->def_readonly_impl(name, d, doc BOOST_PYTHON_DATA_MEMBER_HELPER(D)); } template <class D> self& def_readwrite(char const* name, D& d, char const* doc=0) { return this->def_readwrite_impl(name, d, doc BOOST_PYTHON_DATA_MEMBER_HELPER(D)); } // Property creation template <class Get> self& add_property(char const* name, Get fget, char const* docstr = 0) { base::add_property(name, this->make_getter(fget), docstr); return *this; } template <class Get, class Set> self& add_property(char const* name, Get fget, Set fset, char const* docstr = 0) { base::add_property( name, this->make_getter(fget), this->make_setter(fset), docstr); return *this; } template <class Get> self& add_static_property(char const* name, Get fget) { base::add_static_property(name, object(fget)); return *this; } template <class Get, class Set> self& add_static_property(char const* name, Get fget, Set fset) { base::add_static_property(name, object(fget), object(fset)); return *this; } template <class U> self& setattr(char const* name, U const& x) { this->base::setattr(name, object(x)); return *this; } // Pickle support template <typename PickleSuiteType> self& def_pickle(PickleSuiteType const& x) { error_messages::must_be_derived_from_pickle_suite(x); detail::pickle_suite_finalize<PickleSuiteType>::register_( *this, &PickleSuiteType::getinitargs, &PickleSuiteType::getstate, &PickleSuiteType::setstate, PickleSuiteType::getstate_manages_dict()); return *this; } self& enable_pickling() { this->base::enable_pickling_(false); return *this; } self& staticmethod(char const* name) { this->make_method_static(name); return *this; } private: // helper functions // Builds a method for this class around the given [member] // function pointer or object, appropriately adjusting the type of // the first signature argument so that if f is a member of a // (possibly not wrapped) base class of T, an lvalue argument of // type T will be required. // // @group PropertyHelpers { template <class F> object make_getter(F f) { typedef typename api::is_object_operators<F>::type is_obj_or_proxy; return this->make_fn_impl( detail::unwrap_wrapper((W*)0) , f, is_obj_or_proxy(), (char*)0, detail::is_data_member_pointer<F>() ); } template <class F> object make_setter(F f) { typedef typename api::is_object_operators<F>::type is_obj_or_proxy; return this->make_fn_impl( detail::unwrap_wrapper((W*)0) , f, is_obj_or_proxy(), (int*)0, detail::is_data_member_pointer<F>() ); } template <class T, class F> object make_fn_impl(T*, F const& f, mpl::false_, void*, mpl::false_) { return python::make_function(f, default_call_policies(), detail::get_signature(f, (T*)0)); } template <class T, class D, class B> object make_fn_impl(T*, D B::*pm_, mpl::false_, char*, mpl::true_) { D T::*pm = pm_; return python::make_getter(pm); } template <class T, class D, class B> object make_fn_impl(T*, D B::*pm_, mpl::false_, int*, mpl::true_) { D T::*pm = pm_; return python::make_setter(pm); } template <class T, class F> object make_fn_impl(T*, F const& x, mpl::true_, void*, mpl::false_) { return x; } // } template <class D, class B> self& def_readonly_impl( char const* name, D B::*pm_, char const* doc BOOST_PYTHON_YES_DATA_MEMBER) { return this->add_property(name, pm_, doc); } template <class D, class B> self& def_readwrite_impl( char const* name, D B::*pm_, char const* doc BOOST_PYTHON_YES_DATA_MEMBER) { return this->add_property(name, pm_, pm_, doc); } template <class D> self& def_readonly_impl( char const* name, D& d, char const* BOOST_PYTHON_NO_DATA_MEMBER) { return this->add_static_property(name, python::make_getter(d)); } template <class D> self& def_readwrite_impl( char const* name, D& d, char const* BOOST_PYTHON_NO_DATA_MEMBER) { return this->add_static_property(name, python::make_getter(d), python::make_setter(d)); } template <class DefVisitor> inline void initialize(DefVisitor const& i) { metadata::register_(); // set up runtime metadata/conversions typedef typename metadata::holder holder; this->set_instance_size( objects::additional_instance_size<holder>::value ); this->def(i); } inline void initialize(no_init_t) { metadata::register_(); // set up runtime metadata/conversions this->def_no_init(); } // // These two overloads discriminate between def() as applied to a // generic visitor and everything else. // // @group def_impl { template <class T, class Helper, class LeafVisitor, class Visitor> inline void def_impl( T* , char const* name , LeafVisitor , Helper const& helper , def_visitor<Visitor> const* v ) { v->visit(*this, name, helper); } template <class T, class Fn, class Helper> inline void def_impl( T* , char const* name , Fn fn , Helper const& helper , ... ) { objects::add_to_namespace( *this , name , make_function( fn , helper.policies() , helper.keywords() , detail::get_signature(fn, (T*)0) ) , helper.doc() ); this->def_default(name, fn, helper, mpl::bool_<Helper::has_default_implementation>()); } // } // // These two overloads handle the definition of default // implementation overloads for virtual functions. The second one // handles the case where no default implementation was specified. // // @group def_default { template <class Fn, class Helper> inline void def_default( char const* name , Fn , Helper const& helper , mpl::bool_<true>) { detail::error::virtual_function_default<W,Fn>::must_be_derived_class_member( helper.default_implementation()); objects::add_to_namespace( *this, name, make_function( helper.default_implementation(), helper.policies(), helper.keywords()) ); } template <class Fn, class Helper> inline void def_default(char const*, Fn, Helper const&, mpl::bool_<false>) { } // } // // These two overloads discriminate between def() as applied to // regular functions and def() as applied to the result of // BOOST_PYTHON_FUNCTION_OVERLOADS(). The final argument is used to // discriminate. // // @group def_maybe_overloads { template <class OverloadsT, class SigT> void def_maybe_overloads( char const* name , SigT sig , OverloadsT const& overloads , detail::overloads_base const*) { // convert sig to a type_list (see detail::get_signature in signature.hpp) // before calling detail::define_with_defaults. detail::define_with_defaults( name, overloads, *this, detail::get_signature(sig)); } template <class Fn, class A1> void def_maybe_overloads( char const* name , Fn fn , A1 const& a1 , ...) { this->def_impl( detail::unwrap_wrapper((W*)0) , name , fn , detail::def_helper<A1>(a1) , &fn ); } // } }; // // implementations // template <class W, class X1, class X2, class X3> inline class_<W,X1,X2,X3>::class_(char const* name, char const* doc) : base(name, id_vector::size, id_vector().ids, doc) { this->initialize(init<>()); // select_holder::assert_default_constructible(); } template <class W, class X1, class X2, class X3> inline class_<W,X1,X2,X3>::class_(char const* name, no_init_t) : base(name, id_vector::size, id_vector().ids) { this->initialize(no_init); } template <class W, class X1, class X2, class X3> inline class_<W,X1,X2,X3>::class_(char const* name, char const* doc, no_init_t) : base(name, id_vector::size, id_vector().ids, doc) { this->initialize(no_init); } }} // namespace boost::python # undef BOOST_PYTHON_DATA_MEMBER_HELPER # undef BOOST_PYTHON_YES_DATA_MEMBER # undef BOOST_PYTHON_NO_DATA_MEMBER # undef BOOST_PYTHON_NO_MEMBER_POINTER_ORDERING #endif // CLASS_DWA200216_HPP
snakeleon/YouCompleteMe-x86
third_party/ycmd/cpp/BoostParts/boost/python/class.hpp
C++
gpl-3.0
17,984
{% extends 'admin/master.html' %} {% block body %} Hello World from AnotherMyAdmin!<br/> <a href="{{ url_for('.test') }}">Click me to go to test view</a> {% endblock %}
radioprotector/flask-admin
examples/simple/templates/anotheradmin.html
HTML
bsd-3-clause
177
// Copyright 2012 SocialCode. All rights reserved. // Use of this source code is governed by the MIT // license that can be found in the LICENSE file. package gelf import ( "bytes" "compress/gzip" "compress/zlib" "encoding/json" "fmt" "io" "net" "strings" "sync" ) type Reader struct { mu sync.Mutex conn net.Conn } func NewReader(addr string) (*Reader, error) { var err error udpAddr, err := net.ResolveUDPAddr("udp", addr) if err != nil { return nil, fmt.Errorf("ResolveUDPAddr('%s'): %s", addr, err) } conn, err := net.ListenUDP("udp", udpAddr) if err != nil { return nil, fmt.Errorf("ListenUDP: %s", err) } r := new(Reader) r.conn = conn return r, nil } func (r *Reader) Addr() string { return r.conn.LocalAddr().String() } // FIXME: this will discard data if p isn't big enough to hold the // full message. func (r *Reader) Read(p []byte) (int, error) { msg, err := r.ReadMessage() if err != nil { return -1, err } var data string if msg.Full == "" { data = msg.Short } else { data = msg.Full } return strings.NewReader(data).Read(p) } func (r *Reader) ReadMessage() (*Message, error) { cBuf := make([]byte, ChunkSize) var ( err error n, length int cid, ocid []byte seq, total uint8 cHead []byte cReader io.Reader chunks [][]byte ) for got := 0; got < 128 && (total == 0 || got < int(total)); got++ { if n, err = r.conn.Read(cBuf); err != nil { return nil, fmt.Errorf("Read: %s", err) } cHead, cBuf = cBuf[:2], cBuf[:n] if bytes.Equal(cHead, magicChunked) { //fmt.Printf("chunked %v\n", cBuf[:14]) cid, seq, total = cBuf[2:2+8], cBuf[2+8], cBuf[2+8+1] if ocid != nil && !bytes.Equal(cid, ocid) { return nil, fmt.Errorf("out-of-band message %v (awaited %v)", cid, ocid) } else if ocid == nil { ocid = cid chunks = make([][]byte, total) } n = len(cBuf) - chunkedHeaderLen //fmt.Printf("setting chunks[%d]: %d\n", seq, n) chunks[seq] = append(make([]byte, 0, n), cBuf[chunkedHeaderLen:]...) length += n } else { //not chunked if total > 0 { return nil, fmt.Errorf("out-of-band message (not chunked)") } break } } //fmt.Printf("\nchunks: %v\n", chunks) if length > 0 { if cap(cBuf) < length { cBuf = append(cBuf, make([]byte, 0, length-cap(cBuf))...) } cBuf = cBuf[:0] for i := range chunks { //fmt.Printf("appending %d %v\n", i, chunks[i]) cBuf = append(cBuf, chunks[i]...) } cHead = cBuf[:2] } // the data we get from the wire is compressed if bytes.Equal(cHead, magicGzip) { cReader, err = gzip.NewReader(bytes.NewReader(cBuf)) } else if cHead[0] == magicZlib[0] && (int(cHead[0])*256+int(cHead[1]))%31 == 0 { // zlib is slightly more complicated, but correct cReader, err = zlib.NewReader(bytes.NewReader(cBuf)) } else { // compliance with https://github.com/Graylog2/graylog2-server // treating all messages as uncompressed if they are not gzip, zlib or // chunked cReader = bytes.NewReader(cBuf) } if err != nil { return nil, fmt.Errorf("NewReader: %s", err) } msg := new(Message) if err := json.NewDecoder(cReader).Decode(&msg); err != nil { return nil, fmt.Errorf("json.Unmarshal: %s", err) } return msg, nil }
kerneltime/docker
vendor/src/github.com/Graylog2/go-gelf/gelf/reader.go
GO
apache-2.0
3,244
/** * Generated bundle index. Do not edit. */ export * from './public_api'; export { _document as ɵb, errorHandler as ɵa } from './src/browser'; export { GenericBrowserDomAdapter as ɵh } from './src/browser/generic_browser_adapter'; export { SERVER_TRANSITION_PROVIDERS as ɵg, bootstrapListenerFactory as ɵf } from './src/browser/server-transition'; export { _createNgProbe as ɵc } from './src/dom/debug/ng_probe'; export { EventManagerPlugin as ɵd } from './src/dom/events/event_manager'; export { DomSanitizerImpl as ɵe } from './src/security/dom_sanitization_service';
kujotx/angular4-template
src/angular4/node_modules/@angular/platform-browser/platform-browser.d.ts
TypeScript
mit
582
(function ($) { Drupal.behaviors.quizQuestionBrowserBehavior = { attach: function(context, settings) { // $('div.checkbox div.form-item').hide(); var $cell = $('div.mark-doubtful'); var $checkbox = $(':checkbox', $cell); var $switch = $('.toggle', $cell); if ($checkbox.is(':checked')) { $switch.toggleClass('off'); } $switch.click(function() { $checkbox.click(); }); $checkbox.click(function() { $switch.toggleClass('off'); }); } }; })(jQuery);
shashikantgupta/quiz
sites/all/modules/quiz/theme/quiz_take.js
JavaScript
gpl-2.0
522
<?python hide_navigation = True ?> <html xmlns="http://www.w3.org/1999/xhtml" xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:py="http://genshi.edgewall.org/"><xi:include href="layout.html" /> <head> <title>Action Not Found</title> </head> <body> <h1>Action “${action}” Not Found</h1> <p>The requested action does not exist.</p> <p>Try to <a href="?">access the same URL</a> without parameters.</p> </body> </html>
jackTheRipper/iotrussia
web_server/lib/werkzeug-master/examples/simplewiki/templates/missing_action.html
HTML
gpl-2.0
450
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package macho implements access to Mach-O object files. package macho // High level access to low level data structures. import ( "bytes" "debug/dwarf" "encoding/binary" "fmt" "io" "os" ) // A File represents an open Mach-O file. type File struct { FileHeader ByteOrder binary.ByteOrder Loads []Load Sections []*Section Symtab *Symtab Dysymtab *Dysymtab closer io.Closer } // A Load represents any Mach-O load command. type Load interface { Raw() []byte } // A LoadBytes is the uninterpreted bytes of a Mach-O load command. type LoadBytes []byte func (b LoadBytes) Raw() []byte { return b } // A SegmentHeader is the header for a Mach-O 32-bit or 64-bit load segment command. type SegmentHeader struct { Cmd LoadCmd Len uint32 Name string Addr uint64 Memsz uint64 Offset uint64 Filesz uint64 Maxprot uint32 Prot uint32 Nsect uint32 Flag uint32 } // A Segment represents a Mach-O 32-bit or 64-bit load segment command. type Segment struct { LoadBytes SegmentHeader // Embed ReaderAt for ReadAt method. // Do not embed SectionReader directly // to avoid having Read and Seek. // If a client wants Read and Seek it must use // Open() to avoid fighting over the seek offset // with other clients. io.ReaderAt sr *io.SectionReader } // Data reads and returns the contents of the segment. func (s *Segment) Data() ([]byte, error) { dat := make([]byte, s.sr.Size()) n, err := s.sr.ReadAt(dat, 0) if n == len(dat) { err = nil } return dat[0:n], err } // Open returns a new ReadSeeker reading the segment. func (s *Segment) Open() io.ReadSeeker { return io.NewSectionReader(s.sr, 0, 1<<63-1) } type SectionHeader struct { Name string Seg string Addr uint64 Size uint64 Offset uint32 Align uint32 Reloff uint32 Nreloc uint32 Flags uint32 } type Section struct { SectionHeader // Embed ReaderAt for ReadAt method. // Do not embed SectionReader directly // to avoid having Read and Seek. // If a client wants Read and Seek it must use // Open() to avoid fighting over the seek offset // with other clients. io.ReaderAt sr *io.SectionReader } // Data reads and returns the contents of the Mach-O section. func (s *Section) Data() ([]byte, error) { dat := make([]byte, s.sr.Size()) n, err := s.sr.ReadAt(dat, 0) if n == len(dat) { err = nil } return dat[0:n], err } // Open returns a new ReadSeeker reading the Mach-O section. func (s *Section) Open() io.ReadSeeker { return io.NewSectionReader(s.sr, 0, 1<<63-1) } // A Dylib represents a Mach-O load dynamic library command. type Dylib struct { LoadBytes Name string Time uint32 CurrentVersion uint32 CompatVersion uint32 } // A Symtab represents a Mach-O symbol table command. type Symtab struct { LoadBytes SymtabCmd Syms []Symbol } // A Dysymtab represents a Mach-O dynamic symbol table command. type Dysymtab struct { LoadBytes DysymtabCmd IndirectSyms []uint32 // indices into Symtab.Syms } /* * Mach-O reader */ // FormatError is returned by some operations if the data does // not have the correct format for an object file. type FormatError struct { off int64 msg string val interface{} } func (e *FormatError) Error() string { msg := e.msg if e.val != nil { msg += fmt.Sprintf(" '%v'", e.val) } msg += fmt.Sprintf(" in record at byte %#x", e.off) return msg } // Open opens the named file using os.Open and prepares it for use as a Mach-O binary. func Open(name string) (*File, error) { f, err := os.Open(name) if err != nil { return nil, err } ff, err := NewFile(f) if err != nil { f.Close() return nil, err } ff.closer = f return ff, nil } // Close closes the File. // If the File was created using NewFile directly instead of Open, // Close has no effect. func (f *File) Close() error { var err error if f.closer != nil { err = f.closer.Close() f.closer = nil } return err } // NewFile creates a new File for accessing a Mach-O binary in an underlying reader. // The Mach-O binary is expected to start at position 0 in the ReaderAt. func NewFile(r io.ReaderAt) (*File, error) { f := new(File) sr := io.NewSectionReader(r, 0, 1<<63-1) // Read and decode Mach magic to determine byte order, size. // Magic32 and Magic64 differ only in the bottom bit. var ident [4]byte if _, err := r.ReadAt(ident[0:], 0); err != nil { return nil, err } be := binary.BigEndian.Uint32(ident[0:]) le := binary.LittleEndian.Uint32(ident[0:]) switch Magic32 &^ 1 { case be &^ 1: f.ByteOrder = binary.BigEndian f.Magic = be case le &^ 1: f.ByteOrder = binary.LittleEndian f.Magic = le default: return nil, &FormatError{0, "invalid magic number", nil} } // Read entire file header. if err := binary.Read(sr, f.ByteOrder, &f.FileHeader); err != nil { return nil, err } // Then load commands. offset := int64(fileHeaderSize32) if f.Magic == Magic64 { offset = fileHeaderSize64 } dat := make([]byte, f.Cmdsz) if _, err := r.ReadAt(dat, offset); err != nil { return nil, err } f.Loads = make([]Load, f.Ncmd) bo := f.ByteOrder for i := range f.Loads { // Each load command begins with uint32 command and length. if len(dat) < 8 { return nil, &FormatError{offset, "command block too small", nil} } cmd, siz := LoadCmd(bo.Uint32(dat[0:4])), bo.Uint32(dat[4:8]) if siz < 8 || siz > uint32(len(dat)) { return nil, &FormatError{offset, "invalid command block size", nil} } var cmddat []byte cmddat, dat = dat[0:siz], dat[siz:] offset += int64(siz) var s *Segment switch cmd { default: f.Loads[i] = LoadBytes(cmddat) case LoadCmdDylib: var hdr DylibCmd b := bytes.NewReader(cmddat) if err := binary.Read(b, bo, &hdr); err != nil { return nil, err } l := new(Dylib) if hdr.Name >= uint32(len(cmddat)) { return nil, &FormatError{offset, "invalid name in dynamic library command", hdr.Name} } l.Name = cstring(cmddat[hdr.Name:]) l.Time = hdr.Time l.CurrentVersion = hdr.CurrentVersion l.CompatVersion = hdr.CompatVersion l.LoadBytes = LoadBytes(cmddat) f.Loads[i] = l case LoadCmdSymtab: var hdr SymtabCmd b := bytes.NewReader(cmddat) if err := binary.Read(b, bo, &hdr); err != nil { return nil, err } strtab := make([]byte, hdr.Strsize) if _, err := r.ReadAt(strtab, int64(hdr.Stroff)); err != nil { return nil, err } var symsz int if f.Magic == Magic64 { symsz = 16 } else { symsz = 12 } symdat := make([]byte, int(hdr.Nsyms)*symsz) if _, err := r.ReadAt(symdat, int64(hdr.Symoff)); err != nil { return nil, err } st, err := f.parseSymtab(symdat, strtab, cmddat, &hdr, offset) if err != nil { return nil, err } f.Loads[i] = st f.Symtab = st case LoadCmdDysymtab: var hdr DysymtabCmd b := bytes.NewReader(cmddat) if err := binary.Read(b, bo, &hdr); err != nil { return nil, err } dat := make([]byte, hdr.Nindirectsyms*4) if _, err := r.ReadAt(dat, int64(hdr.Indirectsymoff)); err != nil { return nil, err } x := make([]uint32, hdr.Nindirectsyms) if err := binary.Read(bytes.NewReader(dat), bo, x); err != nil { return nil, err } st := new(Dysymtab) st.LoadBytes = LoadBytes(cmddat) st.DysymtabCmd = hdr st.IndirectSyms = x f.Loads[i] = st f.Dysymtab = st case LoadCmdSegment: var seg32 Segment32 b := bytes.NewReader(cmddat) if err := binary.Read(b, bo, &seg32); err != nil { return nil, err } s = new(Segment) s.LoadBytes = cmddat s.Cmd = cmd s.Len = siz s.Name = cstring(seg32.Name[0:]) s.Addr = uint64(seg32.Addr) s.Memsz = uint64(seg32.Memsz) s.Offset = uint64(seg32.Offset) s.Filesz = uint64(seg32.Filesz) s.Maxprot = seg32.Maxprot s.Prot = seg32.Prot s.Nsect = seg32.Nsect s.Flag = seg32.Flag f.Loads[i] = s for i := 0; i < int(s.Nsect); i++ { var sh32 Section32 if err := binary.Read(b, bo, &sh32); err != nil { return nil, err } sh := new(Section) sh.Name = cstring(sh32.Name[0:]) sh.Seg = cstring(sh32.Seg[0:]) sh.Addr = uint64(sh32.Addr) sh.Size = uint64(sh32.Size) sh.Offset = sh32.Offset sh.Align = sh32.Align sh.Reloff = sh32.Reloff sh.Nreloc = sh32.Nreloc sh.Flags = sh32.Flags f.pushSection(sh, r) } case LoadCmdSegment64: var seg64 Segment64 b := bytes.NewReader(cmddat) if err := binary.Read(b, bo, &seg64); err != nil { return nil, err } s = new(Segment) s.LoadBytes = cmddat s.Cmd = cmd s.Len = siz s.Name = cstring(seg64.Name[0:]) s.Addr = seg64.Addr s.Memsz = seg64.Memsz s.Offset = seg64.Offset s.Filesz = seg64.Filesz s.Maxprot = seg64.Maxprot s.Prot = seg64.Prot s.Nsect = seg64.Nsect s.Flag = seg64.Flag f.Loads[i] = s for i := 0; i < int(s.Nsect); i++ { var sh64 Section64 if err := binary.Read(b, bo, &sh64); err != nil { return nil, err } sh := new(Section) sh.Name = cstring(sh64.Name[0:]) sh.Seg = cstring(sh64.Seg[0:]) sh.Addr = sh64.Addr sh.Size = sh64.Size sh.Offset = sh64.Offset sh.Align = sh64.Align sh.Reloff = sh64.Reloff sh.Nreloc = sh64.Nreloc sh.Flags = sh64.Flags f.pushSection(sh, r) } } if s != nil { s.sr = io.NewSectionReader(r, int64(s.Offset), int64(s.Filesz)) s.ReaderAt = s.sr } } return f, nil } func (f *File) parseSymtab(symdat, strtab, cmddat []byte, hdr *SymtabCmd, offset int64) (*Symtab, error) { bo := f.ByteOrder symtab := make([]Symbol, hdr.Nsyms) b := bytes.NewReader(symdat) for i := range symtab { var n Nlist64 if f.Magic == Magic64 { if err := binary.Read(b, bo, &n); err != nil { return nil, err } } else { var n32 Nlist32 if err := binary.Read(b, bo, &n32); err != nil { return nil, err } n.Name = n32.Name n.Type = n32.Type n.Sect = n32.Sect n.Desc = n32.Desc n.Value = uint64(n32.Value) } sym := &symtab[i] if n.Name >= uint32(len(strtab)) { return nil, &FormatError{offset, "invalid name in symbol table", n.Name} } sym.Name = cstring(strtab[n.Name:]) sym.Type = n.Type sym.Sect = n.Sect sym.Desc = n.Desc sym.Value = n.Value } st := new(Symtab) st.LoadBytes = LoadBytes(cmddat) st.Syms = symtab return st, nil } func (f *File) pushSection(sh *Section, r io.ReaderAt) { f.Sections = append(f.Sections, sh) sh.sr = io.NewSectionReader(r, int64(sh.Offset), int64(sh.Size)) sh.ReaderAt = sh.sr } func cstring(b []byte) string { var i int for i = 0; i < len(b) && b[i] != 0; i++ { } return string(b[0:i]) } // Segment returns the first Segment with the given name, or nil if no such segment exists. func (f *File) Segment(name string) *Segment { for _, l := range f.Loads { if s, ok := l.(*Segment); ok && s.Name == name { return s } } return nil } // Section returns the first section with the given name, or nil if no such // section exists. func (f *File) Section(name string) *Section { for _, s := range f.Sections { if s.Name == name { return s } } return nil } // DWARF returns the DWARF debug information for the Mach-O file. func (f *File) DWARF() (*dwarf.Data, error) { // There are many other DWARF sections, but these // are the required ones, and the debug/dwarf package // does not use the others, so don't bother loading them. var names = [...]string{"abbrev", "info", "line", "str"} var dat [len(names)][]byte for i, name := range names { name = "__debug_" + name s := f.Section(name) if s == nil { continue } b, err := s.Data() if err != nil && uint64(len(b)) < s.Size { return nil, err } dat[i] = b } abbrev, info, line, str := dat[0], dat[1], dat[2], dat[3] return dwarf.New(abbrev, nil, nil, info, line, nil, nil, str) } // ImportedSymbols returns the names of all symbols // referred to by the binary f that are expected to be // satisfied by other libraries at dynamic load time. func (f *File) ImportedSymbols() ([]string, error) { if f.Dysymtab == nil || f.Symtab == nil { return nil, &FormatError{0, "missing symbol table", nil} } st := f.Symtab dt := f.Dysymtab var all []string for _, s := range st.Syms[dt.Iundefsym : dt.Iundefsym+dt.Nundefsym] { all = append(all, s.Name) } return all, nil } // ImportedLibraries returns the paths of all libraries // referred to by the binary f that are expected to be // linked with the binary at dynamic link time. func (f *File) ImportedLibraries() ([]string, error) { var all []string for _, l := range f.Loads { if lib, ok := l.(*Dylib); ok { all = append(all, lib.Name) } } return all, nil }
xinchoubiology/gcc
libgo/go/debug/macho/file.go
GO
gpl-2.0
12,819
/* Copyright (C) 2012-2015 Free Software Foundation, Inc. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "config.h" /* This is a merge of code recommended in the autoconf-2.61 documentation with that recommended in the autoconf-2.13 documentation, with added tweaks to heed C_ALLOCA. */ #if defined HAVE_ALLOCA_H && !defined C_ALLOCA # include <alloca.h> #else # if defined __GNUC__ && !defined C_ALLOCA # if !defined alloca # define alloca __builtin_alloca # endif # else # if defined _AIX /* Indented so that pre-ansi C compilers will ignore it, rather than choke on it. Some versions of AIX require this to be the first thing seen by the compiler except for comments and preprocessor directives. */ #pragma alloca # else # if defined _MSC_VER && !defined C_ALLOCA # include <malloc.h> # define alloca _alloca # else # if !defined alloca # if defined __STDC__ || defined __hpux # if defined HAVE_STDDEF_H # include <stddef.h> # if defined __cplusplus extern "C" void *alloca (size_t); # else extern void *alloca (size_t); # endif # else extern void *alloca (); # endif # else extern char *alloca (); # endif # endif # endif # endif # endif #endif
selmentdev/selment-toolchain
source/binutils-latest/include/alloca-conf.h
C
gpl-3.0
1,860
MODULE = nrfmin SRC = nrfmin.c ifneq (,$(filter gnrc_netdev_default,$(USEMODULE))) SRC += nrfmin_gnrc.c endif include $(RIOTBASE)/Makefile.base
lazytech-org/RIOT
cpu/nrf5x_common/radio/nrfmin/Makefile
Makefile
lgpl-2.1
149
/*! UIkit 2.26.4 | http://www.getuikit.com | (c) 2014 YOOtheme | MIT License */ (function(UI) { "use strict"; var Animations; UI.component('switcher', { defaults: { connect : false, toggle : ">*", active : 0, animation : false, duration : 200, swiping : true }, animating: false, boot: function() { // init code UI.ready(function(context) { UI.$("[data-uk-switcher]", context).each(function() { var switcher = UI.$(this); if (!switcher.data("switcher")) { var obj = UI.switcher(switcher, UI.Utils.options(switcher.attr("data-uk-switcher"))); } }); }); }, init: function() { var $this = this; this.on("click.uk.switcher", this.options.toggle, function(e) { e.preventDefault(); $this.show(this); }); if (this.options.connect) { this.connect = UI.$(this.options.connect); this.connect.children().removeClass("uk-active"); // delegate switch commands within container content if (this.connect.length) { // Init ARIA for connect this.connect.children().attr('aria-hidden', 'true'); this.connect.on("click", '[data-uk-switcher-item]', function(e) { e.preventDefault(); var item = UI.$(this).attr('data-uk-switcher-item'); if ($this.index == item) return; switch(item) { case 'next': case 'previous': $this.show($this.index + (item=='next' ? 1:-1)); break; default: $this.show(parseInt(item, 10)); } }); if (this.options.swiping) { this.connect.on('swipeRight swipeLeft', function(e) { e.preventDefault(); if(!window.getSelection().toString()) { $this.show($this.index + (e.type == 'swipeLeft' ? 1 : -1)); } }); } } var toggles = this.find(this.options.toggle), active = toggles.filter(".uk-active"); if (active.length) { this.show(active, false); } else { if (this.options.active===false) return; active = toggles.eq(this.options.active); this.show(active.length ? active : toggles.eq(0), false); } // Init ARIA for toggles toggles.not(active).attr('aria-expanded', 'false'); active.attr('aria-expanded', 'true'); } }, show: function(tab, animate) { if (this.animating) { return; } if (isNaN(tab)) { tab = UI.$(tab); } else { var toggles = this.find(this.options.toggle); tab = tab < 0 ? toggles.length-1 : tab; tab = toggles.eq(toggles[tab] ? tab : 0); } var $this = this, toggles = this.find(this.options.toggle), active = UI.$(tab), animation = Animations[this.options.animation] || function(current, next) { if (!$this.options.animation) { return Animations.none.apply($this); } var anim = $this.options.animation.split(','); if (anim.length == 1) { anim[1] = anim[0]; } anim[0] = anim[0].trim(); anim[1] = anim[1].trim(); return coreAnimation.apply($this, [anim, current, next]); }; if (animate===false || !UI.support.animation) { animation = Animations.none; } if (active.hasClass("uk-disabled")) return; // Update ARIA for Toggles toggles.attr('aria-expanded', 'false'); active.attr('aria-expanded', 'true'); toggles.filter(".uk-active").removeClass("uk-active"); active.addClass("uk-active"); if (this.options.connect && this.connect.length) { this.index = this.find(this.options.toggle).index(active); if (this.index == -1 ) { this.index = 0; } this.connect.each(function() { var container = UI.$(this), children = UI.$(container.children()), current = UI.$(children.filter('.uk-active')), next = UI.$(children.eq($this.index)); $this.animating = true; animation.apply($this, [current, next]).then(function(){ current.removeClass("uk-active"); next.addClass("uk-active"); // Update ARIA for connect current.attr('aria-hidden', 'true'); next.attr('aria-hidden', 'false'); UI.Utils.checkDisplay(next, true); $this.animating = false; }); }); } this.trigger("show.uk.switcher", [active]); } }); Animations = { 'none': function() { var d = UI.$.Deferred(); d.resolve(); return d.promise(); }, 'fade': function(current, next) { return coreAnimation.apply(this, ['uk-animation-fade', current, next]); }, 'slide-bottom': function(current, next) { return coreAnimation.apply(this, ['uk-animation-slide-bottom', current, next]); }, 'slide-top': function(current, next) { return coreAnimation.apply(this, ['uk-animation-slide-top', current, next]); }, 'slide-vertical': function(current, next, dir) { var anim = ['uk-animation-slide-top', 'uk-animation-slide-bottom']; if (current && current.index() > next.index()) { anim.reverse(); } return coreAnimation.apply(this, [anim, current, next]); }, 'slide-left': function(current, next) { return coreAnimation.apply(this, ['uk-animation-slide-left', current, next]); }, 'slide-right': function(current, next) { return coreAnimation.apply(this, ['uk-animation-slide-right', current, next]); }, 'slide-horizontal': function(current, next, dir) { var anim = ['uk-animation-slide-right', 'uk-animation-slide-left']; if (current && current.index() > next.index()) { anim.reverse(); } return coreAnimation.apply(this, [anim, current, next]); }, 'scale': function(current, next) { return coreAnimation.apply(this, ['uk-animation-scale-up', current, next]); } }; UI.switcher.animations = Animations; // helpers function coreAnimation(cls, current, next) { var d = UI.$.Deferred(), clsIn = cls, clsOut = cls, release; if (next[0]===current[0]) { d.resolve(); return d.promise(); } if (typeof(cls) == 'object') { clsIn = cls[0]; clsOut = cls[1] || cls[0]; } UI.$body.css('overflow-x', 'hidden'); // fix scroll jumping in iOS release = function() { if (current) current.hide().removeClass('uk-active '+clsOut+' uk-animation-reverse'); next.addClass(clsIn).one(UI.support.animation.end, function() { setTimeout(function () { next.removeClass(''+clsIn+'').css({opacity:'', display:''}); }, 0); d.resolve(); UI.$body.css('overflow-x', ''); if (current) current.css({opacity:'', display:''}); }.bind(this)).show(); }; next.css('animation-duration', this.options.duration+'ms'); if (current && current.length) { current.css('animation-duration', this.options.duration+'ms'); current.css('display', 'none').addClass(clsOut+' uk-animation-reverse').one(UI.support.animation.end, function() { release(); }.bind(this)).css('display', ''); } else { next.addClass('uk-active'); release(); } return d.promise(); } })(UIkit);
YutingPang/webapp
www/static/js/core/switcher.js
JavaScript
apache-2.0
9,248
#ifndef __PERF_DATA_H #define __PERF_DATA_H #include <stdbool.h> enum perf_data_mode { PERF_DATA_MODE_WRITE, PERF_DATA_MODE_READ, }; struct perf_data_file { const char *path; int fd; bool is_pipe; bool force; unsigned long size; enum perf_data_mode mode; }; static inline bool perf_data_file__is_read(struct perf_data_file *file) { return file->mode == PERF_DATA_MODE_READ; } static inline bool perf_data_file__is_write(struct perf_data_file *file) { return file->mode == PERF_DATA_MODE_WRITE; } static inline int perf_data_file__is_pipe(struct perf_data_file *file) { return file->is_pipe; } static inline int perf_data_file__fd(struct perf_data_file *file) { return file->fd; } static inline unsigned long perf_data_file__size(struct perf_data_file *file) { return file->size; } int perf_data_file__open(struct perf_data_file *file); void perf_data_file__close(struct perf_data_file *file); ssize_t perf_data_file__write(struct perf_data_file *file, void *buf, size_t size); /* * If at_exit is set, only rename current perf.data to * perf.data.<postfix>, continue write on original file. * Set at_exit when flushing the last output. * * Return value is fd of new output. */ int perf_data_file__switch(struct perf_data_file *file, const char *postfix, size_t pos, bool at_exit); #endif /* __PERF_DATA_H */
jumpnow/linux
tools/perf/util/data.h
C
gpl-2.0
1,369
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) /* * Shared descriptors for ahash algorithms * * Copyright 2017-2019 NXP */ #include "compat.h" #include "desc_constr.h" #include "caamhash_desc.h" /** * cnstr_shdsc_ahash - ahash shared descriptor * @desc: pointer to buffer used for descriptor construction * @adata: pointer to authentication transform definitions. * A split key is required for SEC Era < 6; the size of the split key * is specified in this case. * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224, * SHA256, SHA384, SHA512}. * @state: algorithm state OP_ALG_AS_{INIT, FINALIZE, INITFINALIZE, UPDATE} * @digestsize: algorithm's digest size * @ctx_len: size of Context Register * @import_ctx: true if previous Context Register needs to be restored * must be true for ahash update and final * must be false for for ahash first and digest * @era: SEC Era */ void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state, int digestsize, int ctx_len, bool import_ctx, int era) { u32 op = adata->algtype; init_sh_desc(desc, HDR_SHARE_SERIAL); /* Append key if it has been set; ahash update excluded */ if (state != OP_ALG_AS_UPDATE && adata->keylen) { u32 *skip_key_load; /* Skip key loading if already shared */ skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_SHRD); if (era < 6) append_key_as_imm(desc, adata->key_virt, adata->keylen_pad, adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC); else append_proto_dkp(desc, adata); set_jump_tgt_here(desc, skip_key_load); op |= OP_ALG_AAI_HMAC_PRECOMP; } /* If needed, import context from software */ if (import_ctx) append_seq_load(desc, ctx_len, LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_CONTEXT); /* Class 2 operation */ append_operation(desc, op | state | OP_ALG_ENCRYPT); /* * Load from buf and/or src and write to req->result or state->context * Calculate remaining bytes to read */ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); /* Read remaining bytes */ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG | KEY_VLF); /* Store class2 context bytes */ append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_CONTEXT); } EXPORT_SYMBOL(cnstr_shdsc_ahash); /** * cnstr_shdsc_sk_hash - shared descriptor for symmetric key cipher-based * hash algorithms * @desc: pointer to buffer used for descriptor construction * @adata: pointer to authentication transform definitions. * @state: algorithm state OP_ALG_AS_{INIT, FINALIZE, INITFINALIZE, UPDATE} * @digestsize: algorithm's digest size * @ctx_len: size of Context Register * @key_dma: I/O Virtual Address of the key */ void cnstr_shdsc_sk_hash(u32 * const desc, struct alginfo *adata, u32 state, int digestsize, int ctx_len, dma_addr_t key_dma) { u32 *skip_key_load; init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); /* Skip loading of key, context if already shared */ skip_key_load = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD); if (state == OP_ALG_AS_INIT || state == OP_ALG_AS_INITFINAL) { append_key_as_imm(desc, adata->key_virt, adata->keylen, adata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); } else { /* UPDATE, FINALIZE */ if (is_xcbc_aes(adata->algtype)) /* Load K1 */ append_key(desc, adata->key_dma, adata->keylen, CLASS_1 | KEY_DEST_CLASS_REG | KEY_ENC); else /* CMAC */ append_key_as_imm(desc, adata->key_virt, adata->keylen, adata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); /* Restore context */ append_seq_load(desc, ctx_len, LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT); } set_jump_tgt_here(desc, skip_key_load); /* Class 1 operation */ append_operation(desc, adata->algtype | state | OP_ALG_ENCRYPT); /* * Load from buf and/or src and write to req->result or state->context * Calculate remaining bytes to read */ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); /* Read remaining bytes */ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_LAST1 | FIFOLD_TYPE_MSG | FIFOLDST_VLF); /* * Save context: * - xcbc: partial hash, keys K2 and K3 * - cmac: partial hash, constant L = E(K,0) */ append_seq_store(desc, digestsize, LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT); if (is_xcbc_aes(adata->algtype) && state == OP_ALG_AS_INIT) /* Save K1 */ append_fifo_store(desc, key_dma, adata->keylen, LDST_CLASS_1_CCB | FIFOST_TYPE_KEY_KEK); } EXPORT_SYMBOL(cnstr_shdsc_sk_hash); MODULE_LICENSE("Dual BSD/GPL"); MODULE_DESCRIPTION("FSL CAAM ahash descriptors support"); MODULE_AUTHOR("NXP Semiconductors");
koct9i/linux
drivers/crypto/caam/caamhash_desc.c
C
gpl-2.0
4,815
/****************************************************************************** * * Module Name: exmutex - ASL Mutex Acquire/Release functions * *****************************************************************************/ /* * Copyright (C) 2000 - 2010, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acinterp.h" #include "acevents.h" #define _COMPONENT ACPI_EXECUTER ACPI_MODULE_NAME("exmutex") /* Local prototypes */ static void acpi_ex_link_mutex(union acpi_operand_object *obj_desc, struct acpi_thread_state *thread); /******************************************************************************* * * FUNCTION: acpi_ex_unlink_mutex * * PARAMETERS: obj_desc - The mutex to be unlinked * * RETURN: None * * DESCRIPTION: Remove a mutex from the "AcquiredMutex" list * ******************************************************************************/ void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc) { struct acpi_thread_state *thread = obj_desc->mutex.owner_thread; if (!thread) { return; } /* Doubly linked list */ if (obj_desc->mutex.next) { (obj_desc->mutex.next)->mutex.prev = obj_desc->mutex.prev; } if (obj_desc->mutex.prev) { (obj_desc->mutex.prev)->mutex.next = obj_desc->mutex.next; /* * Migrate the previous sync level associated with this mutex to * the previous mutex on the list so that it may be preserved. * This handles the case where several mutexes have been acquired * at the same level, but are not released in opposite order. */ (obj_desc->mutex.prev)->mutex.original_sync_level = obj_desc->mutex.original_sync_level; } else { thread->acquired_mutex_list = obj_desc->mutex.next; } } /******************************************************************************* * * FUNCTION: acpi_ex_link_mutex * * PARAMETERS: obj_desc - The mutex to be linked * Thread - Current executing thread object * * RETURN: None * * DESCRIPTION: Add a mutex to the "AcquiredMutex" list for this walk * ******************************************************************************/ static void acpi_ex_link_mutex(union acpi_operand_object *obj_desc, struct acpi_thread_state *thread) { union acpi_operand_object *list_head; list_head = thread->acquired_mutex_list; /* This object will be the first object in the list */ obj_desc->mutex.prev = NULL; obj_desc->mutex.next = list_head; /* Update old first object to point back to this object */ if (list_head) { list_head->mutex.prev = obj_desc; } /* Update list head */ thread->acquired_mutex_list = obj_desc; } /******************************************************************************* * * FUNCTION: acpi_ex_acquire_mutex_object * * PARAMETERS: Timeout - Timeout in milliseconds * obj_desc - Mutex object * thread_id - Current thread state * * RETURN: Status * * DESCRIPTION: Acquire an AML mutex, low-level interface. Provides a common * path that supports multiple acquires by the same thread. * * MUTEX: Interpreter must be locked * * NOTE: This interface is called from three places: * 1) From acpi_ex_acquire_mutex, via an AML Acquire() operator * 2) From acpi_ex_acquire_global_lock when an AML Field access requires the * global lock * 3) From the external interface, acpi_acquire_global_lock * ******************************************************************************/ acpi_status acpi_ex_acquire_mutex_object(u16 timeout, union acpi_operand_object *obj_desc, acpi_thread_id thread_id) { acpi_status status; ACPI_FUNCTION_TRACE_PTR(ex_acquire_mutex_object, obj_desc); if (!obj_desc) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* Support for multiple acquires by the owning thread */ if (obj_desc->mutex.thread_id == thread_id) { /* * The mutex is already owned by this thread, just increment the * acquisition depth */ obj_desc->mutex.acquisition_depth++; return_ACPI_STATUS(AE_OK); } /* Acquire the mutex, wait if necessary. Special case for Global Lock */ if (obj_desc == acpi_gbl_global_lock_mutex) { status = acpi_ev_acquire_global_lock(timeout); } else { status = acpi_ex_system_wait_mutex(obj_desc->mutex.os_mutex, timeout); } if (ACPI_FAILURE(status)) { /* Includes failure from a timeout on time_desc */ return_ACPI_STATUS(status); } /* Acquired the mutex: update mutex object */ obj_desc->mutex.thread_id = thread_id; obj_desc->mutex.acquisition_depth = 1; obj_desc->mutex.original_sync_level = 0; obj_desc->mutex.owner_thread = NULL; /* Used only for AML Acquire() */ return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ex_acquire_mutex * * PARAMETERS: time_desc - Timeout integer * obj_desc - Mutex object * walk_state - Current method execution state * * RETURN: Status * * DESCRIPTION: Acquire an AML mutex * ******************************************************************************/ acpi_status acpi_ex_acquire_mutex(union acpi_operand_object *time_desc, union acpi_operand_object *obj_desc, struct acpi_walk_state *walk_state) { acpi_status status; ACPI_FUNCTION_TRACE_PTR(ex_acquire_mutex, obj_desc); if (!obj_desc) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* Must have a valid thread state struct */ if (!walk_state->thread) { ACPI_ERROR((AE_INFO, "Cannot acquire Mutex [%4.4s], null thread info", acpi_ut_get_node_name(obj_desc->mutex.node))); return_ACPI_STATUS(AE_AML_INTERNAL); } /* * Current sync level must be less than or equal to the sync level of the * mutex. This mechanism provides some deadlock prevention */ if (walk_state->thread->current_sync_level > obj_desc->mutex.sync_level) { ACPI_ERROR((AE_INFO, "Cannot acquire Mutex [%4.4s], current SyncLevel is too large (%u)", acpi_ut_get_node_name(obj_desc->mutex.node), walk_state->thread->current_sync_level)); return_ACPI_STATUS(AE_AML_MUTEX_ORDER); } status = acpi_ex_acquire_mutex_object((u16) time_desc->integer.value, obj_desc, walk_state->thread->thread_id); if (ACPI_SUCCESS(status) && obj_desc->mutex.acquisition_depth == 1) { /* Save Thread object, original/current sync levels */ obj_desc->mutex.owner_thread = walk_state->thread; obj_desc->mutex.original_sync_level = walk_state->thread->current_sync_level; walk_state->thread->current_sync_level = obj_desc->mutex.sync_level; /* Link the mutex to the current thread for force-unlock at method exit */ acpi_ex_link_mutex(obj_desc, walk_state->thread); } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_release_mutex_object * * PARAMETERS: obj_desc - The object descriptor for this op * * RETURN: Status * * DESCRIPTION: Release a previously acquired Mutex, low level interface. * Provides a common path that supports multiple releases (after * previous multiple acquires) by the same thread. * * MUTEX: Interpreter must be locked * * NOTE: This interface is called from three places: * 1) From acpi_ex_release_mutex, via an AML Acquire() operator * 2) From acpi_ex_release_global_lock when an AML Field access requires the * global lock * 3) From the external interface, acpi_release_global_lock * ******************************************************************************/ acpi_status acpi_ex_release_mutex_object(union acpi_operand_object *obj_desc) { acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(ex_release_mutex_object); if (obj_desc->mutex.acquisition_depth == 0) { return (AE_NOT_ACQUIRED); } /* Match multiple Acquires with multiple Releases */ obj_desc->mutex.acquisition_depth--; if (obj_desc->mutex.acquisition_depth != 0) { /* Just decrement the depth and return */ return_ACPI_STATUS(AE_OK); } if (obj_desc->mutex.owner_thread) { /* Unlink the mutex from the owner's list */ acpi_ex_unlink_mutex(obj_desc); obj_desc->mutex.owner_thread = NULL; } /* Release the mutex, special case for Global Lock */ if (obj_desc == acpi_gbl_global_lock_mutex) { status = acpi_ev_release_global_lock(); } else { acpi_os_release_mutex(obj_desc->mutex.os_mutex); } /* Clear mutex info */ obj_desc->mutex.thread_id = 0; return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_release_mutex * * PARAMETERS: obj_desc - The object descriptor for this op * walk_state - Current method execution state * * RETURN: Status * * DESCRIPTION: Release a previously acquired Mutex. * ******************************************************************************/ acpi_status acpi_ex_release_mutex(union acpi_operand_object *obj_desc, struct acpi_walk_state *walk_state) { acpi_status status = AE_OK; u8 previous_sync_level; struct acpi_thread_state *owner_thread; ACPI_FUNCTION_TRACE(ex_release_mutex); if (!obj_desc) { return_ACPI_STATUS(AE_BAD_PARAMETER); } owner_thread = obj_desc->mutex.owner_thread; /* The mutex must have been previously acquired in order to release it */ if (!owner_thread) { ACPI_ERROR((AE_INFO, "Cannot release Mutex [%4.4s], not acquired", acpi_ut_get_node_name(obj_desc->mutex.node))); return_ACPI_STATUS(AE_AML_MUTEX_NOT_ACQUIRED); } /* Must have a valid thread. */ if (!walk_state->thread) { ACPI_ERROR((AE_INFO, "Cannot release Mutex [%4.4s], null thread info", acpi_ut_get_node_name(obj_desc->mutex.node))); return_ACPI_STATUS(AE_AML_INTERNAL); } /* * The Mutex is owned, but this thread must be the owner. * Special case for Global Lock, any thread can release */ if ((owner_thread->thread_id != walk_state->thread->thread_id) && (obj_desc != acpi_gbl_global_lock_mutex)) { ACPI_ERROR((AE_INFO, "Thread %u cannot release Mutex [%4.4s] acquired by thread %u", (u32)walk_state->thread->thread_id, acpi_ut_get_node_name(obj_desc->mutex.node), (u32)owner_thread->thread_id)); return_ACPI_STATUS(AE_AML_NOT_OWNER); } /* * The sync level of the mutex must be equal to the current sync level. In * other words, the current level means that at least one mutex at that * level is currently being held. Attempting to release a mutex of a * different level can only mean that the mutex ordering rule is being * violated. This behavior is clarified in ACPI 4.0 specification. */ if (obj_desc->mutex.sync_level != owner_thread->current_sync_level) { ACPI_ERROR((AE_INFO, "Cannot release Mutex [%4.4s], SyncLevel mismatch: mutex %u current %u", acpi_ut_get_node_name(obj_desc->mutex.node), obj_desc->mutex.sync_level, walk_state->thread->current_sync_level)); return_ACPI_STATUS(AE_AML_MUTEX_ORDER); } /* * Get the previous sync_level from the head of the acquired mutex list. * This handles the case where several mutexes at the same level have been * acquired, but are not released in reverse order. */ previous_sync_level = owner_thread->acquired_mutex_list->mutex.original_sync_level; status = acpi_ex_release_mutex_object(obj_desc); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } if (obj_desc->mutex.acquisition_depth == 0) { /* Restore the previous sync_level */ owner_thread->current_sync_level = previous_sync_level; } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_release_all_mutexes * * PARAMETERS: Thread - Current executing thread object * * RETURN: Status * * DESCRIPTION: Release all mutexes held by this thread * * NOTE: This function is called as the thread is exiting the interpreter. * Mutexes are not released when an individual control method is exited, but * only when the parent thread actually exits the interpreter. This allows one * method to acquire a mutex, and a different method to release it, as long as * this is performed underneath a single parent control method. * ******************************************************************************/ void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread) { union acpi_operand_object *next = thread->acquired_mutex_list; union acpi_operand_object *obj_desc; ACPI_FUNCTION_ENTRY(); /* Traverse the list of owned mutexes, releasing each one */ while (next) { obj_desc = next; next = obj_desc->mutex.next; obj_desc->mutex.prev = NULL; obj_desc->mutex.next = NULL; obj_desc->mutex.acquisition_depth = 0; /* Release the mutex, special case for Global Lock */ if (obj_desc == acpi_gbl_global_lock_mutex) { /* Ignore errors */ (void)acpi_ev_release_global_lock(); } else { acpi_os_release_mutex(obj_desc->mutex.os_mutex); } /* Mark mutex unowned */ obj_desc->mutex.owner_thread = NULL; obj_desc->mutex.thread_id = 0; /* Update Thread sync_level (Last mutex is the important one) */ thread->current_sync_level = obj_desc->mutex.original_sync_level; } }
nazgee/igep-kernel
drivers/acpi/acpica/exmutex.c
C
gpl-2.0
15,331
<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en"> <head> <title>RSpec results</title> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <meta http-equiv="Expires" content="-1" /> <meta http-equiv="Pragma" content="no-cache" /> <style type="text/css"> body { margin: 0; padding: 0; background: #fff; font-size: 80%; } </style> </head> <body> <div class="rspec-report"> <script type="text/javascript"> // <![CDATA[ function moveProgressBar(percentDone) { document.getElementById("rspec-header").style.width = percentDone +"%"; } function makeRed(element_id) { document.getElementById(element_id).style.background = '#C40D0D'; document.getElementById(element_id).style.color = '#FFFFFF'; } function makeYellow(element_id) { if (element_id == "rspec-header" && document.getElementById(element_id).style.background != '#C40D0D') { document.getElementById(element_id).style.background = '#FAF834'; document.getElementById(element_id).style.color = '#000000'; } else { document.getElementById(element_id).style.background = '#FAF834'; document.getElementById(element_id).style.color = '#000000'; } } // ]]> </script> <style type="text/css"> #rspec-header { background: #65C400; color: #fff; } .rspec-report h1 { margin: 0px 10px 0px 10px; padding: 10px; font-family: "Lucida Grande", Helvetica, sans-serif; font-size: 1.8em; } #summary { margin: 0; padding: 5px 10px; font-family: "Lucida Grande", Helvetica, sans-serif; text-align: right; position: absolute; top: 0px; right: 0px; } #summary p { margin: 0 0 0 2px; } #summary #totals { font-size: 1.2em; } .example_group { margin: 0 10px 5px; background: #fff; } dl { margin: 0; padding: 0 0 5px; font: normal 11px "Lucida Grande", Helvetica, sans-serif; } dt { padding: 3px; background: #65C400; color: #fff; font-weight: bold; } dd { margin: 5px 0 5px 5px; padding: 3px 3px 3px 18px; } dd.spec.passed { border-left: 5px solid #65C400; border-bottom: 1px solid #65C400; background: #DBFFB4; color: #3D7700; } dd.spec.failed { border-left: 5px solid #C20000; border-bottom: 1px solid #C20000; color: #C20000; background: #FFFBD3; } dd.spec.not_implemented { border-left: 5px solid #FAF834; border-bottom: 1px solid #FAF834; background: #FCFB98; color: #131313; } dd.spec.pending_fixed { border-left: 5px solid #0000C2; border-bottom: 1px solid #0000C2; color: #0000C2; background: #D3FBFF; } .backtrace { color: #000; font-size: 12px; } a { color: #BE5C00; } /* Ruby code, style similar to vibrant ink */ .ruby { font-size: 12px; font-family: monospace; color: white; background-color: black; padding: 0.1em 0 0.2em 0; } .ruby .keyword { color: #FF6600; } .ruby .constant { color: #339999; } .ruby .attribute { color: white; } .ruby .global { color: white; } .ruby .module { color: white; } .ruby .class { color: white; } .ruby .string { color: #66FF00; } .ruby .ident { color: white; } .ruby .method { color: #FFCC00; } .ruby .number { color: white; } .ruby .char { color: white; } .ruby .comment { color: #9933CC; } .ruby .symbol { color: white; } .ruby .regex { color: #44B4CC; } .ruby .punct { color: white; } .ruby .escape { color: white; } .ruby .interp { color: white; } .ruby .expr { color: white; } .ruby .offending { background-color: gray; } .ruby .linenum { width: 75px; padding: 0.1em 1em 0.2em 0; color: #000000; background-color: #FFFBD3; } </style> <div id="rspec-header"> <h1>RSpec Results</h1> <div id="summary"> <p id="totals">&nbsp;</p> <p id="duration">&nbsp;</p> </div> </div> <div class="results"> <div class="example_group"> <dl> <dt id="example_group_1">Mocker</dt> <script type="text/javascript">moveProgressBar('5.8');</script> <dd class="spec passed"><span class="passed_spec_name">should be able to call mock()</span></dd> <script type="text/javascript">makeRed('rspec-header');</script> <script type="text/javascript">makeRed('example_group_1');</script> <script type="text/javascript">moveProgressBar('11.7');</script> <dd class="spec failed"> <span class="failed_spec_name">should fail when expected message not received</span> <div class="failure" id="failure_1"> <div class="message"><pre>Mock 'poke me' expected :poke with (any args) once, but received it 0 times</pre></div> <div class="backtrace"><pre>./failing_examples/mocking_example.rb:13: /Users/david/projects/ruby/rspec/rspec/spec/spec/runner/formatter/html_formatter_spec.rb:25: /Users/david/projects/ruby/rspec/rspec/spec/spec/runner/formatter/html_formatter_spec.rb:21:in `chdir' /Users/david/projects/ruby/rspec/rspec/spec/spec/runner/formatter/html_formatter_spec.rb:21:</pre></div> <pre class="ruby"><code><span class="linenum">11</span> <span class="ident">it</span> <span class="punct">&quot;</span><span class="string">should fail when expected message not received</span><span class="punct">&quot;</span> <span class="keyword">do</span> <span class="linenum">12</span> <span class="ident">mock</span> <span class="punct">=</span> <span class="ident">mock</span><span class="punct">(&quot;</span><span class="string">poke me</span><span class="punct">&quot;)</span> <span class="offending"><span class="linenum">13</span> <span class="ident">mock</span><span class="punct">.</span><span class="ident">should_receive</span><span class="punct">(</span><span class="symbol">:poke</span><span class="punct">)</span></span> <span class="linenum">14</span> <span class="keyword">end</span> <span class="linenum">15</span> </code></pre> </div> </dd> <script type="text/javascript">moveProgressBar('17.6');</script> <dd class="spec failed"> <span class="failed_spec_name">should fail when messages are received out of order</span> <div class="failure" id="failure_2"> <div class="message"><pre>Mock 'one two three' received :three out of order</pre></div> <div class="backtrace"><pre>./failing_examples/mocking_example.rb:22: /Users/david/projects/ruby/rspec/rspec/spec/spec/runner/formatter/html_formatter_spec.rb:25: /Users/david/projects/ruby/rspec/rspec/spec/spec/runner/formatter/html_formatter_spec.rb:21:in `chdir' /Users/david/projects/ruby/rspec/rspec/spec/spec/runner/formatter/html_formatter_spec.rb:21:</pre></div> <pre class="ruby"><code><span class="linenum">20</span> <span class="ident">mock</span><span class="punct">.</span><span class="ident">should_receive</span><span class="punct">(</span><span class="symbol">:three</span><span class="punct">).</span><span class="ident">ordered</span> <span class="linenum">21</span> <span class="ident">mock</span><span class="punct">.</span><span class="ident">one</span> <span class="offending"><span class="linenum">22</span> <span class="ident">mock</span><span class="punct">.</span><span class="ident">three</span></span> <span class="linenum">23</span> <span class="ident">mock</span><span class="punct">.</span><span class="ident">two</span> <span class="linenum">24</span> <span class="keyword">end</span></code></pre> </div> </dd> <script type="text/javascript">moveProgressBar('23.5');</script> <dd class="spec failed"> <span class="failed_spec_name">should get yelled at when sending unexpected messages</span> <div class="failure" id="failure_3"> <div class="message"><pre>Mock 'don't talk to me' expected :any_message_at_all with (no args) 0 times, but received it once</pre></div> <div class="backtrace"><pre>./failing_examples/mocking_example.rb:29: /Users/david/projects/ruby/rspec/rspec/spec/spec/runner/formatter/html_formatter_spec.rb:25: /Users/david/projects/ruby/rspec/rspec/spec/spec/runner/formatter/html_formatter_spec.rb:21:in `chdir' /Users/david/projects/ruby/rspec/rspec/spec/spec/runner/formatter/html_formatter_spec.rb:21:</pre></div> <pre class="ruby"><code><span class="linenum">27</span> <span class="ident">mock</span> <span class="punct">=</span> <span class="ident">mock</span><span class="punct">(&quot;</span><span class="string">don't talk to me</span><span class="punct">&quot;)</span> <span class="linenum">28</span> <span class="ident">mock</span><span class="punct">.</span><span class="ident">should_not_receive</span><span class="punct">(</span><span class="symbol">:any_message_at_all</span><span class="punct">)</span> <span class="offending"><span class="linenum">29</span> <span class="ident">mock</span><span class="punct">.</span><span class="ident">any_message_at_all</span></span> <span class="linenum">30</span> <span class="keyword">end</span></code></pre> </div> </dd> <script type="text/javascript">moveProgressBar('29.4');</script> <dd class="spec pending_fixed"> <span class="failed_spec_name">has a bug we need to fix</span> <div class="failure" id="failure_4"> <div class="message"><pre>Expected pending 'here is the bug' to fail. No Error was raised.</pre></div> <div class="backtrace"><pre>./failing_examples/mocking_example.rb:33: /Users/david/projects/ruby/rspec/rspec/spec/spec/runner/formatter/html_formatter_spec.rb:25: /Users/david/projects/ruby/rspec/rspec/spec/spec/runner/formatter/html_formatter_spec.rb:21:in `chdir' /Users/david/projects/ruby/rspec/rspec/spec/spec/runner/formatter/html_formatter_spec.rb:21:</pre></div> <pre class="ruby"><code><span class="linenum">31</span> <span class="linenum">32</span> <span class="ident">it</span> <span class="punct">&quot;</span><span class="string">has a bug we need to fix</span><span class="punct">&quot;</span> <span class="keyword">do</span> <span class="offending"><span class="linenum">33</span> <span class="ident">pending</span> <span class="punct">&quot;</span><span class="string">here is the bug</span><span class="punct">&quot;</span> <span class="keyword">do</span></span> <span class="linenum">34</span> <span class="comment"># Actually, no. It's fixed. This will fail because it passes :-)</span> <span class="linenum">35</span> <span class="ident">mock</span> <span class="punct">=</span> <span class="ident">mock</span><span class="punct">(&quot;</span><span class="string">Bug</span><span class="punct">&quot;)</span></code></pre> </div> </dd> </dl> </div> <div class="example_group"> <dl> <dt id="example_group_2">Running specs with --diff</dt> <script type="text/javascript">makeRed('example_group_2');</script> <script type="text/javascript">moveProgressBar('35.2');</script> <dd class="spec failed"> <span class="failed_spec_name">should print diff of different strings</span> <div class="failure" id="failure_5"> <div class="message"><pre>expected: &quot;RSpec is a\nbehaviour driven development\nframework for Ruby\n&quot;, got: &quot;RSpec is a\nbehavior driven development\nframework for Ruby\n&quot; (using ==) Diff: @@ -1,4 +1,4 @@ RSpec is a -behaviour driven development +behavior driven development framework for Ruby </pre></div> <div class="backtrace"><pre>./failing_examples/diffing_spec.rb:13: /Users/david/projects/ruby/rspec/rspec/spec/spec/runner/formatter/html_formatter_spec.rb:25: /Users/david/projects/ruby/rspec/rspec/spec/spec/runner/formatter/html_formatter_spec.rb:21:in `chdir' /Users/david/projects/ruby/rspec/rspec/spec/spec/runner/formatter/html_formatter_spec.rb:21:</pre></div> <pre class="ruby"><code><span class="linenum">11</span><span class="ident">framework</span> <span class="keyword">for</span> <span class="constant">Ruby</span> <span class="linenum">12</span><span class="constant">EOF</span> <span class="offending"><span class="linenum">13</span> <span class="ident">usa</span><span class="punct">.</span><span class="ident">should</span> <span class="punct">==</span> <span class="ident">uk</span></span> <span class="linenum">14</span> <span class="keyword">end</span></code></pre> </div> </dd> <script type="text/javascript">moveProgressBar('41.1');</script> <dd class="spec failed"> <span class="failed_spec_name">should print diff of different objects' pretty representation</span> <div class="failure" id="failure_6"> <div class="message"><pre>expected &lt;Animal name=bob, species=tortoise &gt; , got &lt;Animal name=bob, species=giraffe &gt; (using .eql?) Diff: @@ -1,5 +1,5 @@ &lt;Animal name=bob, -species=tortoise +species=giraffe &gt; </pre></div> <div class="backtrace"><pre>./failing_examples/diffing_spec.rb:34: /Users/david/projects/ruby/rspec/rspec/spec/spec/runner/formatter/html_formatter_spec.rb:25: /Users/david/projects/ruby/rspec/rspec/spec/spec/runner/formatter/html_formatter_spec.rb:21:in `chdir' /Users/david/projects/ruby/rspec/rspec/spec/spec/runner/formatter/html_formatter_spec.rb:21:</pre></div> <pre class="ruby"><code><span class="linenum">32</span> <span class="ident">expected</span> <span class="punct">=</span> <span class="constant">Animal</span><span class="punct">.</span><span class="ident">new</span> <span class="punct">&quot;</span><span class="string">bob</span><span class="punct">&quot;,</span> <span class="punct">&quot;</span><span class="string">giraffe</span><span class="punct">&quot;</span> <span class="linenum">33</span> <span class="ident">actual</span> <span class="punct">=</span> <span class="constant">Animal</span><span class="punct">.</span><span class="ident">new</span> <span class="punct">&quot;</span><span class="string">bob</span><span class="punct">&quot;,</span> <span class="punct">&quot;</span><span class="string">tortoise</span><span class="punct">&quot;</span> <span class="offending"><span class="linenum">34</span> <span class="ident">expected</span><span class="punct">.</span><span class="ident">should</span> <span class="ident">eql</span><span class="punct">(</span><span class="ident">actual</span><span class="punct">)</span></span> <span class="linenum">35</span> <span class="keyword">end</span> <span class="linenum">36</span><span class="keyword">end</span></code></pre> </div> </dd> </dl> </div> <div class="example_group"> <dl> <dt id="example_group_3">A consumer of a stub</dt> <script type="text/javascript">moveProgressBar('47.0');</script> <dd class="spec passed"><span class="passed_spec_name">should be able to stub methods on any Object</span></dd> </dl> </div> <div class="example_group"> <dl> <dt id="example_group_4">A stubbed method on a class</dt> <script type="text/javascript">moveProgressBar('52.9');</script> <dd class="spec passed"><span class="passed_spec_name">should return the stubbed value</span></dd> <script type="text/javascript">moveProgressBar('58.8');</script> <dd class="spec passed"><span class="passed_spec_name">should revert to the original method after each spec</span></dd> <script type="text/javascript">moveProgressBar('64.7');</script> <dd class="spec passed"><span class="passed_spec_name">can stub! and mock the same message</span></dd> </dl> </div> <div class="example_group"> <dl> <dt id="example_group_5">A mock</dt> <script type="text/javascript">moveProgressBar('70.5');</script> <dd class="spec passed"><span class="passed_spec_name">can stub!</span></dd> <script type="text/javascript">moveProgressBar('76.4');</script> <dd class="spec passed"><span class="passed_spec_name">can stub! and mock</span></dd> <script type="text/javascript">moveProgressBar('82.3');</script> <dd class="spec passed"><span class="passed_spec_name">can stub! and mock the same message</span></dd> </dl> </div> <div class="example_group"> <dl> <dt id="example_group_6">pending example (using pending method)</dt> <script type="text/javascript">makeYellow('example_group_6');</script> <script type="text/javascript">moveProgressBar('88.2');</script> <dd class="spec not_implemented"><span class="not_implemented_spec_name">should be reported as &quot;PENDING: for some reason&quot; (PENDING: for some reason)</span></dd> </dl> </div> <div class="example_group"> <dl> <dt id="example_group_7">pending example (with no block)</dt> <script type="text/javascript">makeYellow('example_group_7');</script> <script type="text/javascript">moveProgressBar('94.1');</script> <dd class="spec not_implemented"><span class="not_implemented_spec_name">should be reported as &quot;PENDING: Not Yet Implemented&quot; (PENDING: Not Yet Implemented)</span></dd> </dl> </div> <div class="example_group"> <dl> <dt id="example_group_8">pending example (with block for pending)</dt> <script type="text/javascript">makeYellow('example_group_8');</script> <script type="text/javascript">moveProgressBar('100.0');</script> <dd class="spec not_implemented"><span class="not_implemented_spec_name">should have a failing block, passed to pending, reported as &quot;PENDING: for some reason&quot; (PENDING: for some reason)</span></dd> </dl> </div> <script type="text/javascript">document.getElementById('duration').innerHTML = "Finished in <strong>x seconds</strong>";</script> <script type="text/javascript">document.getElementById('totals').innerHTML = "17 examples, 6 failures, 3 pending";</script> </div> </div> </body> </html>
tapn2it/attic-antiques
vendor/spree/vendor/plugins/rspec/spec/spec/runner/formatter/html_formatted-1.8.6.html
HTML
bsd-3-clause
17,536
/** * @license * Copyright 2016 Google Inc. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import MDCFoundation from 'material__base/foundation'; import MDCCheckboxAdapter from './adapter'; import { cssClasses, strings, numbers } from './constants'; export default class MDCCheckboxFoundation extends MDCFoundation<MDCCheckboxAdapter> { static readonly cssClasses: cssClasses; static readonly strings: strings; static readonly numbers: numbers; static readonly defaultAdapter: MDCCheckboxAdapter; isChecked(): boolean; setChecked(checked: boolean): void; isIndeterminate(): boolean; setIndeterminate(indeterminate: boolean): void; isDisabled(): boolean; setDisabled(disabled: boolean): void; getValue(): string; setValue(value: string): void; handleAnimationEnd(): void; handleChange(): void; }
borisyankov/DefinitelyTyped
types/material__checkbox/foundation.d.ts
TypeScript
mit
1,405
/* * Copyright 2014 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * */ #ifndef __AMDGPU_ATOMFIRMWARE_H__ #define __AMDGPU_ATOMFIRMWARE_H__ bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev); void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev); void amdgpu_atomfirmware_scratch_regs_save(struct amdgpu_device *adev); void amdgpu_atomfirmware_scratch_regs_restore(struct amdgpu_device *adev); void amdgpu_atomfirmware_scratch_regs_engine_hung(struct amdgpu_device *adev, bool hung); int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev); #endif
mkvdv/au-linux-kernel-autumn-2017
linux/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
C
gpl-3.0
1,687
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from datetime import date, datetime from dateutil import relativedelta import json import time from openerp.osv import fields, osv from openerp.tools.float_utils import float_compare, float_round from openerp.tools.translate import _ from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT from openerp.exceptions import Warning from openerp import SUPERUSER_ID, api import openerp.addons.decimal_precision as dp from openerp.addons.procurement import procurement import logging _logger = logging.getLogger(__name__) #---------------------------------------------------------- # Incoterms #---------------------------------------------------------- class stock_incoterms(osv.osv): _name = "stock.incoterms" _description = "Incoterms" _columns = { 'name': fields.char('Name', required=True, help="Incoterms are series of sales terms. They are used to divide transaction costs and responsibilities between buyer and seller and reflect state-of-the-art transportation practices."), 'code': fields.char('Code', size=3, required=True, help="Incoterm Standard Code"), 'active': fields.boolean('Active', help="By unchecking the active field, you may hide an INCOTERM you will not use."), } _defaults = { 'active': True, } #---------------------------------------------------------- # Stock Location #---------------------------------------------------------- class stock_location(osv.osv): _name = "stock.location" _description = "Inventory Locations" _parent_name = "location_id" _parent_store = True _parent_order = 'name' _order = 'parent_left' _rec_name = 'complete_name' def _location_owner(self, cr, uid, location, context=None): ''' Return the company owning the location if any ''' return location and (location.usage == 'internal') and location.company_id or False def _complete_name(self, cr, uid, ids, name, args, context=None): """ Forms complete name of location from parent location to child location. @return: Dictionary of values """ res = {} for m in self.browse(cr, uid, ids, context=context): res[m.id] = m.name parent = m.location_id while parent: res[m.id] = parent.name + ' / ' + res[m.id] parent = parent.location_id return res def _get_sublocations(self, cr, uid, ids, context=None): """ return all sublocations of the given stock locations (included) """ if context is None: context = {} context_with_inactive = context.copy() context_with_inactive['active_test'] = False return self.search(cr, uid, [('id', 'child_of', ids)], context=context_with_inactive) def _name_get(self, cr, uid, location, context=None): name = location.name while location.location_id and location.usage != 'view': location = location.location_id name = location.name + '/' + name return name def name_get(self, cr, uid, ids, context=None): res = [] for location in self.browse(cr, uid, ids, context=context): res.append((location.id, self._name_get(cr, uid, location, context=context))) return res _columns = { 'name': fields.char('Location Name', required=True, translate=True), 'active': fields.boolean('Active', help="By unchecking the active field, you may hide a location without deleting it."), 'usage': fields.selection([ ('supplier', 'Supplier Location'), ('view', 'View'), ('internal', 'Internal Location'), ('customer', 'Customer Location'), ('inventory', 'Inventory'), ('procurement', 'Procurement'), ('production', 'Production'), ('transit', 'Transit Location')], 'Location Type', required=True, help="""* Supplier Location: Virtual location representing the source location for products coming from your suppliers \n* View: Virtual location used to create a hierarchical structures for your warehouse, aggregating its child locations ; can't directly contain products \n* Internal Location: Physical locations inside your own warehouses, \n* Customer Location: Virtual location representing the destination location for products sent to your customers \n* Inventory: Virtual location serving as counterpart for inventory operations used to correct stock levels (Physical inventories) \n* Procurement: Virtual location serving as temporary counterpart for procurement operations when the source (supplier or production) is not known yet. This location should be empty when the procurement scheduler has finished running. \n* Production: Virtual counterpart location for production operations: this location consumes the raw material and produces finished products \n* Transit Location: Counterpart location that should be used in inter-companies or inter-warehouses operations """, select=True), 'complete_name': fields.function(_complete_name, type='char', string="Location Name", store={'stock.location': (_get_sublocations, ['name', 'location_id', 'active'], 10)}), 'location_id': fields.many2one('stock.location', 'Parent Location', select=True, ondelete='cascade'), 'child_ids': fields.one2many('stock.location', 'location_id', 'Contains'), 'partner_id': fields.many2one('res.partner', 'Owner', help="Owner of the location if not internal"), 'comment': fields.text('Additional Information'), 'posx': fields.integer('Corridor (X)', help="Optional localization details, for information purpose only"), 'posy': fields.integer('Shelves (Y)', help="Optional localization details, for information purpose only"), 'posz': fields.integer('Height (Z)', help="Optional localization details, for information purpose only"), 'parent_left': fields.integer('Left Parent', select=1), 'parent_right': fields.integer('Right Parent', select=1), 'company_id': fields.many2one('res.company', 'Company', select=1, help='Let this field empty if this location is shared between companies'), 'scrap_location': fields.boolean('Is a Scrap Location?', help='Check this box to allow using this location to put scrapped/damaged goods.'), 'removal_strategy_id': fields.many2one('product.removal', 'Removal Strategy', help="Defines the default method used for suggesting the exact location (shelf) where to take the products from, which lot etc. for this location. This method can be enforced at the product category level, and a fallback is made on the parent locations if none is set here."), 'putaway_strategy_id': fields.many2one('product.putaway', 'Put Away Strategy', help="Defines the default method used for suggesting the exact location (shelf) where to store the products. This method can be enforced at the product category level, and a fallback is made on the parent locations if none is set here."), 'loc_barcode': fields.char('Location Barcode'), } _defaults = { 'active': True, 'usage': 'internal', 'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.location', context=c), 'posx': 0, 'posy': 0, 'posz': 0, 'scrap_location': False, } _sql_constraints = [('loc_barcode_company_uniq', 'unique (loc_barcode,company_id)', 'The barcode for a location must be unique per company !')] def create(self, cr, uid, default, context=None): if not default.get('loc_barcode', False): default.update({'loc_barcode': default.get('complete_name', False)}) return super(stock_location, self).create(cr, uid, default, context=context) def get_putaway_strategy(self, cr, uid, location, product, context=None): ''' Returns the location where the product has to be put, if any compliant putaway strategy is found. Otherwise returns None.''' putaway_obj = self.pool.get('product.putaway') loc = location while loc: if loc.putaway_strategy_id: res = putaway_obj.putaway_apply(cr, uid, loc.putaway_strategy_id, product, context=context) if res: return res loc = loc.location_id def _default_removal_strategy(self, cr, uid, context=None): return 'fifo' def get_removal_strategy(self, cr, uid, location, product, context=None): ''' Returns the removal strategy to consider for the given product and location. :param location: browse record (stock.location) :param product: browse record (product.product) :rtype: char ''' if product.categ_id.removal_strategy_id: return product.categ_id.removal_strategy_id.method loc = location while loc: if loc.removal_strategy_id: return loc.removal_strategy_id.method loc = loc.location_id return self._default_removal_strategy(cr, uid, context=context) def get_warehouse(self, cr, uid, location, context=None): """ Returns warehouse id of warehouse that contains location :param location: browse record (stock.location) """ wh_obj = self.pool.get("stock.warehouse") whs = wh_obj.search(cr, uid, [('view_location_id.parent_left', '<=', location.parent_left), ('view_location_id.parent_right', '>=', location.parent_left)], context=context) return whs and whs[0] or False #---------------------------------------------------------- # Routes #---------------------------------------------------------- class stock_location_route(osv.osv): _name = 'stock.location.route' _description = "Inventory Routes" _order = 'sequence' _columns = { 'name': fields.char('Route Name', required=True, translate=True), 'sequence': fields.integer('Sequence'), 'pull_ids': fields.one2many('procurement.rule', 'route_id', 'Pull Rules', copy=True), 'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the route without removing it."), 'push_ids': fields.one2many('stock.location.path', 'route_id', 'Push Rules', copy=True), 'product_selectable': fields.boolean('Applicable on Product'), 'product_categ_selectable': fields.boolean('Applicable on Product Category'), 'warehouse_selectable': fields.boolean('Applicable on Warehouse'), 'supplied_wh_id': fields.many2one('stock.warehouse', 'Supplied Warehouse'), 'supplier_wh_id': fields.many2one('stock.warehouse', 'Supplier Warehouse'), 'company_id': fields.many2one('res.company', 'Company', select=1, help='Let this field empty if this route is shared between all companies'), } _defaults = { 'sequence': lambda self, cr, uid, ctx: 0, 'active': True, 'product_selectable': True, 'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.location.route', context=c), } def write(self, cr, uid, ids, vals, context=None): '''when a route is deactivated, deactivate also its pull and push rules''' if isinstance(ids, (int, long)): ids = [ids] res = super(stock_location_route, self).write(cr, uid, ids, vals, context=context) if 'active' in vals: push_ids = [] pull_ids = [] for route in self.browse(cr, uid, ids, context=context): if route.push_ids: push_ids += [r.id for r in route.push_ids if r.active != vals['active']] if route.pull_ids: pull_ids += [r.id for r in route.pull_ids if r.active != vals['active']] if push_ids: self.pool.get('stock.location.path').write(cr, uid, push_ids, {'active': vals['active']}, context=context) if pull_ids: self.pool.get('procurement.rule').write(cr, uid, pull_ids, {'active': vals['active']}, context=context) return res #---------------------------------------------------------- # Quants #---------------------------------------------------------- class stock_quant(osv.osv): """ Quants are the smallest unit of stock physical instances """ _name = "stock.quant" _description = "Quants" def _get_quant_name(self, cr, uid, ids, name, args, context=None): """ Forms complete name of location from parent location to child location. @return: Dictionary of values """ res = {} for q in self.browse(cr, uid, ids, context=context): res[q.id] = q.product_id.code or '' if q.lot_id: res[q.id] = q.lot_id.name res[q.id] += ': ' + str(q.qty) + q.product_id.uom_id.name return res def _calc_inventory_value(self, cr, uid, ids, name, attr, context=None): context = dict(context or {}) res = {} uid_company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id for quant in self.browse(cr, uid, ids, context=context): context.pop('force_company', None) if quant.company_id.id != uid_company_id: #if the company of the quant is different than the current user company, force the company in the context #then re-do a browse to read the property fields for the good company. context['force_company'] = quant.company_id.id quant = self.browse(cr, uid, quant.id, context=context) res[quant.id] = self._get_inventory_value(cr, uid, quant, context=context) return res def _get_inventory_value(self, cr, uid, quant, context=None): return quant.product_id.standard_price * quant.qty _columns = { 'name': fields.function(_get_quant_name, type='char', string='Identifier'), 'product_id': fields.many2one('product.product', 'Product', required=True, ondelete="restrict", readonly=True, select=True), 'location_id': fields.many2one('stock.location', 'Location', required=True, ondelete="restrict", readonly=True, select=True, auto_join=True), 'qty': fields.float('Quantity', required=True, help="Quantity of products in this quant, in the default unit of measure of the product", readonly=True, select=True), 'package_id': fields.many2one('stock.quant.package', string='Package', help="The package containing this quant", readonly=True, select=True), 'packaging_type_id': fields.related('package_id', 'packaging_id', type='many2one', relation='product.packaging', string='Type of packaging', readonly=True, store=True), 'reservation_id': fields.many2one('stock.move', 'Reserved for Move', help="The move the quant is reserved for", readonly=True, select=True), 'lot_id': fields.many2one('stock.production.lot', 'Lot', readonly=True, select=True, ondelete="restrict"), 'cost': fields.float('Unit Cost'), 'owner_id': fields.many2one('res.partner', 'Owner', help="This is the owner of the quant", readonly=True, select=True), 'create_date': fields.datetime('Creation Date', readonly=True), 'in_date': fields.datetime('Incoming Date', readonly=True, select=True), 'history_ids': fields.many2many('stock.move', 'stock_quant_move_rel', 'quant_id', 'move_id', 'Moves', help='Moves that operate(d) on this quant', copy=False), 'company_id': fields.many2one('res.company', 'Company', help="The company to which the quants belong", required=True, readonly=True, select=True), 'inventory_value': fields.function(_calc_inventory_value, string="Inventory Value", type='float', readonly=True), # Used for negative quants to reconcile after compensated by a new positive one 'propagated_from_id': fields.many2one('stock.quant', 'Linked Quant', help='The negative quant this is coming from', readonly=True, select=True), 'negative_move_id': fields.many2one('stock.move', 'Move Negative Quant', help='If this is a negative quant, this will be the move that caused this negative quant.', readonly=True), 'negative_dest_location_id': fields.related('negative_move_id', 'location_dest_id', type='many2one', relation='stock.location', string="Negative Destination Location", readonly=True, help="Technical field used to record the destination location of a move that created a negative quant"), } _defaults = { 'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.quant', context=c), } def init(self, cr): cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('stock_quant_product_location_index',)) if not cr.fetchone(): cr.execute('CREATE INDEX stock_quant_product_location_index ON stock_quant (product_id, location_id, company_id, qty, in_date, reservation_id)') def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True): ''' Overwrite the read_group in order to sum the function field 'inventory_value' in group by''' res = super(stock_quant, self).read_group(cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby, lazy=lazy) if 'inventory_value' in fields: for line in res: if '__domain' in line: lines = self.search(cr, uid, line['__domain'], context=context) inv_value = 0.0 for line2 in self.browse(cr, uid, lines, context=context): inv_value += line2.inventory_value line['inventory_value'] = inv_value return res def action_view_quant_history(self, cr, uid, ids, context=None): ''' This function returns an action that display the history of the quant, which mean all the stock moves that lead to this quant creation with this quant quantity. ''' mod_obj = self.pool.get('ir.model.data') act_obj = self.pool.get('ir.actions.act_window') result = mod_obj.get_object_reference(cr, uid, 'stock', 'action_move_form2') id = result and result[1] or False result = act_obj.read(cr, uid, [id], context={})[0] move_ids = [] for quant in self.browse(cr, uid, ids, context=context): move_ids += [move.id for move in quant.history_ids] result['domain'] = "[('id','in',[" + ','.join(map(str, move_ids)) + "])]" return result def quants_reserve(self, cr, uid, quants, move, link=False, context=None): '''This function reserves quants for the given move (and optionally given link). If the total of quantity reserved is enough, the move's state is also set to 'assigned' :param quants: list of tuple(quant browse record or None, qty to reserve). If None is given as first tuple element, the item will be ignored. Negative quants should not be received as argument :param move: browse record :param link: browse record (stock.move.operation.link) ''' toreserve = [] reserved_availability = move.reserved_availability #split quants if needed for quant, qty in quants: if qty <= 0.0 or (quant and quant.qty <= 0.0): raise osv.except_osv(_('Error!'), _('You can not reserve a negative quantity or a negative quant.')) if not quant: continue self._quant_split(cr, uid, quant, qty, context=context) toreserve.append(quant.id) reserved_availability += quant.qty #reserve quants if toreserve: self.write(cr, SUPERUSER_ID, toreserve, {'reservation_id': move.id}, context=context) #if move has a picking_id, write on that picking that pack_operation might have changed and need to be recomputed if move.picking_id: self.pool.get('stock.picking').write(cr, uid, [move.picking_id.id], {'recompute_pack_op': True}, context=context) #check if move'state needs to be set as 'assigned' rounding = move.product_id.uom_id.rounding if float_compare(reserved_availability, move.product_qty, precision_rounding=rounding) == 0 and move.state in ('confirmed', 'waiting') : self.pool.get('stock.move').write(cr, uid, [move.id], {'state': 'assigned'}, context=context) elif float_compare(reserved_availability, 0, precision_rounding=rounding) > 0 and not move.partially_available: self.pool.get('stock.move').write(cr, uid, [move.id], {'partially_available': True}, context=context) def quants_move(self, cr, uid, quants, move, location_to, location_from=False, lot_id=False, owner_id=False, src_package_id=False, dest_package_id=False, context=None): """Moves all given stock.quant in the given destination location. Unreserve from current move. :param quants: list of tuple(browse record(stock.quant) or None, quantity to move) :param move: browse record (stock.move) :param location_to: browse record (stock.location) depicting where the quants have to be moved :param location_from: optional browse record (stock.location) explaining where the quant has to be taken (may differ from the move source location in case a removal strategy applied). This parameter is only used to pass to _quant_create if a negative quant must be created :param lot_id: ID of the lot that must be set on the quants to move :param owner_id: ID of the partner that must own the quants to move :param src_package_id: ID of the package that contains the quants to move :param dest_package_id: ID of the package that must be set on the moved quant """ quants_reconcile = [] to_move_quants = [] self._check_location(cr, uid, location_to, context=context) for quant, qty in quants: if not quant: #If quant is None, we will create a quant to move (and potentially a negative counterpart too) quant = self._quant_create(cr, uid, qty, move, lot_id=lot_id, owner_id=owner_id, src_package_id=src_package_id, dest_package_id=dest_package_id, force_location_from=location_from, force_location_to=location_to, context=context) else: self._quant_split(cr, uid, quant, qty, context=context) to_move_quants.append(quant) quants_reconcile.append(quant) if to_move_quants: to_recompute_move_ids = [x.reservation_id.id for x in to_move_quants if x.reservation_id and x.reservation_id.id != move.id] self.move_quants_write(cr, uid, to_move_quants, move, location_to, dest_package_id, context=context) self.pool.get('stock.move').recalculate_move_state(cr, uid, to_recompute_move_ids, context=context) if location_to.usage == 'internal': # Do manual search for quant to avoid full table scan (order by id) cr.execute(""" SELECT 0 FROM stock_quant, stock_location WHERE product_id = %s AND stock_location.id = stock_quant.location_id AND ((stock_location.parent_left >= %s AND stock_location.parent_left < %s) OR stock_location.id = %s) AND qty < 0.0 LIMIT 1 """, (move.product_id.id, location_to.parent_left, location_to.parent_right, location_to.id)) if cr.fetchone(): for quant in quants_reconcile: self._quant_reconcile_negative(cr, uid, quant, move, context=context) def move_quants_write(self, cr, uid, quants, move, location_dest_id, dest_package_id, context=None): context=context or {} vals = {'location_id': location_dest_id.id, 'history_ids': [(4, move.id)], 'reservation_id': False} if not context.get('entire_pack'): vals.update({'package_id': dest_package_id}) self.write(cr, SUPERUSER_ID, [q.id for q in quants], vals, context=context) def quants_get_prefered_domain(self, cr, uid, location, product, qty, domain=None, prefered_domain_list=[], restrict_lot_id=False, restrict_partner_id=False, context=None): ''' This function tries to find quants in the given location for the given domain, by trying to first limit the choice on the quants that match the first item of prefered_domain_list as well. But if the qty requested is not reached it tries to find the remaining quantity by looping on the prefered_domain_list (tries with the second item and so on). Make sure the quants aren't found twice => all the domains of prefered_domain_list should be orthogonal ''' if domain is None: domain = [] quants = [(None, qty)] #don't look for quants in location that are of type production, supplier or inventory. if location.usage in ['inventory', 'production', 'supplier']: return quants res_qty = qty if not prefered_domain_list: return self.quants_get(cr, uid, location, product, qty, domain=domain, restrict_lot_id=restrict_lot_id, restrict_partner_id=restrict_partner_id, context=context) for prefered_domain in prefered_domain_list: res_qty_cmp = float_compare(res_qty, 0, precision_rounding=product.uom_id.rounding) if res_qty_cmp > 0: #try to replace the last tuple (None, res_qty) with something that wasn't chosen at first because of the prefered order quants.pop() tmp_quants = self.quants_get(cr, uid, location, product, res_qty, domain=domain + prefered_domain, restrict_lot_id=restrict_lot_id, restrict_partner_id=restrict_partner_id, context=context) for quant in tmp_quants: if quant[0]: res_qty -= quant[1] quants += tmp_quants return quants def quants_get(self, cr, uid, location, product, qty, domain=None, restrict_lot_id=False, restrict_partner_id=False, context=None): """ Use the removal strategies of product to search for the correct quants If you inherit, put the super at the end of your method. :location: browse record of the parent location where the quants have to be found :product: browse record of the product to find :qty in UoM of product """ result = [] domain = domain or [('qty', '>', 0.0)] if restrict_partner_id: domain += [('owner_id', '=', restrict_partner_id)] if restrict_lot_id: domain += [('lot_id', '=', restrict_lot_id)] if location: removal_strategy = self.pool.get('stock.location').get_removal_strategy(cr, uid, location, product, context=context) result += self.apply_removal_strategy(cr, uid, location, product, qty, domain, removal_strategy, context=context) return result def apply_removal_strategy(self, cr, uid, location, product, quantity, domain, removal_strategy, context=None): if removal_strategy == 'fifo': order = 'in_date, id' return self._quants_get_order(cr, uid, location, product, quantity, domain, order, context=context) elif removal_strategy == 'lifo': order = 'in_date desc, id desc' return self._quants_get_order(cr, uid, location, product, quantity, domain, order, context=context) raise osv.except_osv(_('Error!'), _('Removal strategy %s not implemented.' % (removal_strategy,))) def _quant_create(self, cr, uid, qty, move, lot_id=False, owner_id=False, src_package_id=False, dest_package_id=False, force_location_from=False, force_location_to=False, context=None): '''Create a quant in the destination location and create a negative quant in the source location if it's an internal location. ''' if context is None: context = {} price_unit = self.pool.get('stock.move').get_price_unit(cr, uid, move, context=context) location = force_location_to or move.location_dest_id rounding = move.product_id.uom_id.rounding vals = { 'product_id': move.product_id.id, 'location_id': location.id, 'qty': float_round(qty, precision_rounding=rounding), 'cost': price_unit, 'history_ids': [(4, move.id)], 'in_date': datetime.now().strftime(DEFAULT_SERVER_DATETIME_FORMAT), 'company_id': move.company_id.id, 'lot_id': lot_id, 'owner_id': owner_id, 'package_id': dest_package_id, } if move.location_id.usage == 'internal': #if we were trying to move something from an internal location and reach here (quant creation), #it means that a negative quant has to be created as well. negative_vals = vals.copy() negative_vals['location_id'] = force_location_from and force_location_from.id or move.location_id.id negative_vals['qty'] = float_round(-qty, precision_rounding=rounding) negative_vals['cost'] = price_unit negative_vals['negative_move_id'] = move.id negative_vals['package_id'] = src_package_id negative_quant_id = self.create(cr, SUPERUSER_ID, negative_vals, context=context) vals.update({'propagated_from_id': negative_quant_id}) #create the quant as superuser, because we want to restrict the creation of quant manually: we should always use this method to create quants quant_id = self.create(cr, SUPERUSER_ID, vals, context=context) return self.browse(cr, uid, quant_id, context=context) def _quant_split(self, cr, uid, quant, qty, context=None): context = context or {} rounding = quant.product_id.uom_id.rounding if float_compare(abs(quant.qty), abs(qty), precision_rounding=rounding) <= 0: # if quant <= qty in abs, take it entirely return False qty_round = float_round(qty, precision_rounding=rounding) new_qty_round = float_round(quant.qty - qty, precision_rounding=rounding) # Fetch the history_ids manually as it will not do a join with the stock moves then (=> a lot faster) cr.execute("""SELECT move_id FROM stock_quant_move_rel WHERE quant_id = %s""", (quant.id,)) res = cr.fetchall() new_quant = self.copy(cr, SUPERUSER_ID, quant.id, default={'qty': new_qty_round, 'history_ids': [(4, x[0]) for x in res]}, context=context) self.write(cr, SUPERUSER_ID, quant.id, {'qty': qty_round}, context=context) return self.browse(cr, uid, new_quant, context=context) def _get_latest_move(self, cr, uid, quant, context=None): move = False for m in quant.history_ids: if not move or m.date > move.date: move = m return move @api.cr_uid_ids_context def _quants_merge(self, cr, uid, solved_quant_ids, solving_quant, context=None): path = [] for move in solving_quant.history_ids: path.append((4, move.id)) self.write(cr, SUPERUSER_ID, solved_quant_ids, {'history_ids': path}, context=context) def _quant_reconcile_negative(self, cr, uid, quant, move, context=None): """ When new quant arrive in a location, try to reconcile it with negative quants. If it's possible, apply the cost of the new quant to the conter-part of the negative quant. """ solving_quant = quant dom = [('qty', '<', 0)] if quant.lot_id: dom += [('lot_id', '=', quant.lot_id.id)] dom += [('owner_id', '=', quant.owner_id.id)] dom += [('package_id', '=', quant.package_id.id)] dom += [('id', '!=', quant.propagated_from_id.id)] quants = self.quants_get(cr, uid, quant.location_id, quant.product_id, quant.qty, dom, context=context) product_uom_rounding = quant.product_id.uom_id.rounding for quant_neg, qty in quants: if not quant_neg or not solving_quant: continue to_solve_quant_ids = self.search(cr, uid, [('propagated_from_id', '=', quant_neg.id)], context=context) if not to_solve_quant_ids: continue solving_qty = qty solved_quant_ids = [] for to_solve_quant in self.browse(cr, uid, to_solve_quant_ids, context=context): if float_compare(solving_qty, 0, precision_rounding=product_uom_rounding) <= 0: continue solved_quant_ids.append(to_solve_quant.id) self._quant_split(cr, uid, to_solve_quant, min(solving_qty, to_solve_quant.qty), context=context) solving_qty -= min(solving_qty, to_solve_quant.qty) remaining_solving_quant = self._quant_split(cr, uid, solving_quant, qty, context=context) remaining_neg_quant = self._quant_split(cr, uid, quant_neg, -qty, context=context) #if the reconciliation was not complete, we need to link together the remaining parts if remaining_neg_quant: remaining_to_solve_quant_ids = self.search(cr, uid, [('propagated_from_id', '=', quant_neg.id), ('id', 'not in', solved_quant_ids)], context=context) if remaining_to_solve_quant_ids: self.write(cr, SUPERUSER_ID, remaining_to_solve_quant_ids, {'propagated_from_id': remaining_neg_quant.id}, context=context) if solving_quant.propagated_from_id and solved_quant_ids: self.write(cr, SUPERUSER_ID, solved_quant_ids, {'propagated_from_id': solving_quant.propagated_from_id.id}, context=context) #delete the reconciled quants, as it is replaced by the solved quants self.unlink(cr, SUPERUSER_ID, [quant_neg.id], context=context) if solved_quant_ids: #price update + accounting entries adjustments self._price_update(cr, uid, solved_quant_ids, solving_quant.cost, context=context) #merge history (and cost?) self._quants_merge(cr, uid, solved_quant_ids, solving_quant, context=context) self.unlink(cr, SUPERUSER_ID, [solving_quant.id], context=context) solving_quant = remaining_solving_quant def _price_update(self, cr, uid, ids, newprice, context=None): self.write(cr, SUPERUSER_ID, ids, {'cost': newprice}, context=context) def quants_unreserve(self, cr, uid, move, context=None): related_quants = [x.id for x in move.reserved_quant_ids] if related_quants: #if move has a picking_id, write on that picking that pack_operation might have changed and need to be recomputed if move.picking_id: self.pool.get('stock.picking').write(cr, uid, [move.picking_id.id], {'recompute_pack_op': True}, context=context) if move.partially_available: self.pool.get("stock.move").write(cr, uid, [move.id], {'partially_available': False}, context=context) self.write(cr, SUPERUSER_ID, related_quants, {'reservation_id': False}, context=context) def _quants_get_order(self, cr, uid, location, product, quantity, domain=[], orderby='in_date', context=None): ''' Implementation of removal strategies If it can not reserve, it will return a tuple (None, qty) ''' if context is None: context = {} domain += location and [('location_id', 'child_of', location.id)] or [] domain += [('product_id', '=', product.id)] if context.get('force_company'): domain += [('company_id', '=', context.get('force_company'))] else: domain += [('company_id', '=', self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id)] res = [] offset = 0 while float_compare(quantity, 0, precision_rounding=product.uom_id.rounding) > 0: quants = self.search(cr, uid, domain, order=orderby, limit=10, offset=offset, context=context) if not quants: res.append((None, quantity)) break for quant in self.browse(cr, uid, quants, context=context): rounding = product.uom_id.rounding if float_compare(quantity, abs(quant.qty), precision_rounding=rounding) >= 0: res += [(quant, abs(quant.qty))] quantity -= abs(quant.qty) elif float_compare(quantity, 0.0, precision_rounding=rounding) != 0: res += [(quant, quantity)] quantity = 0 break offset += 10 return res def _check_location(self, cr, uid, location, context=None): if location.usage == 'view': raise osv.except_osv(_('Error'), _('You cannot move to a location of type view %s.') % (location.name)) return True #---------------------------------------------------------- # Stock Picking #---------------------------------------------------------- class stock_picking(osv.osv): _name = "stock.picking" _inherit = ['mail.thread'] _description = "Picking List" _order = "priority desc, date asc, id desc" def _set_min_date(self, cr, uid, id, field, value, arg, context=None): move_obj = self.pool.get("stock.move") if value: move_ids = [move.id for move in self.browse(cr, uid, id, context=context).move_lines] move_obj.write(cr, uid, move_ids, {'date_expected': value}, context=context) def _set_priority(self, cr, uid, id, field, value, arg, context=None): move_obj = self.pool.get("stock.move") if value: move_ids = [move.id for move in self.browse(cr, uid, id, context=context).move_lines] move_obj.write(cr, uid, move_ids, {'priority': value}, context=context) def get_min_max_date(self, cr, uid, ids, field_name, arg, context=None): """ Finds minimum and maximum dates for picking. @return: Dictionary of values """ res = {} for id in ids: res[id] = {'min_date': False, 'max_date': False, 'priority': '1'} if not ids: return res cr.execute("""select picking_id, min(date_expected), max(date_expected), max(priority) from stock_move where picking_id IN %s group by picking_id""", (tuple(ids),)) for pick, dt1, dt2, prio in cr.fetchall(): res[pick]['min_date'] = dt1 res[pick]['max_date'] = dt2 res[pick]['priority'] = prio return res def create(self, cr, user, vals, context=None): context = context or {} if ('name' not in vals) or (vals.get('name') in ('/', False)): ptype_id = vals.get('picking_type_id', context.get('default_picking_type_id', False)) sequence_id = self.pool.get('stock.picking.type').browse(cr, user, ptype_id, context=context).sequence_id.id vals['name'] = self.pool.get('ir.sequence').get_id(cr, user, sequence_id, 'id', context=context) return super(stock_picking, self).create(cr, user, vals, context) def _state_get(self, cr, uid, ids, field_name, arg, context=None): '''The state of a picking depends on the state of its related stock.move draft: the picking has no line or any one of the lines is draft done, draft, cancel: all lines are done / draft / cancel confirmed, waiting, assigned, partially_available depends on move_type (all at once or partial) ''' res = {} for pick in self.browse(cr, uid, ids, context=context): if (not pick.move_lines) or any([x.state == 'draft' for x in pick.move_lines]): res[pick.id] = 'draft' continue if all([x.state == 'cancel' for x in pick.move_lines]): res[pick.id] = 'cancel' continue if all([x.state in ('cancel', 'done') for x in pick.move_lines]): res[pick.id] = 'done' continue order = {'confirmed': 0, 'waiting': 1, 'assigned': 2} order_inv = {0: 'confirmed', 1: 'waiting', 2: 'assigned'} lst = [order[x.state] for x in pick.move_lines if x.state not in ('cancel', 'done')] if pick.move_type == 'one': res[pick.id] = order_inv[min(lst)] else: #we are in the case of partial delivery, so if all move are assigned, picking #should be assign too, else if one of the move is assigned, or partially available, picking should be #in partially available state, otherwise, picking is in waiting or confirmed state res[pick.id] = order_inv[max(lst)] if not all(x == 2 for x in lst): if any(x == 2 for x in lst): res[pick.id] = 'partially_available' else: #if all moves aren't assigned, check if we have one product partially available for move in pick.move_lines: if move.partially_available: res[pick.id] = 'partially_available' break return res def _get_pickings(self, cr, uid, ids, context=None): res = set() for move in self.browse(cr, uid, ids, context=context): if move.picking_id: res.add(move.picking_id.id) return list(res) def _get_pickings_dates_priority(self, cr, uid, ids, context=None): res = set() for move in self.browse(cr, uid, ids, context=context): if move.picking_id and (not (move.picking_id.min_date < move.date_expected < move.picking_id.max_date) or move.priority > move.picking_id.priority): res.add(move.picking_id.id) return list(res) def _get_pack_operation_exist(self, cr, uid, ids, field_name, arg, context=None): res = {} for pick in self.browse(cr, uid, ids, context=context): res[pick.id] = False if pick.pack_operation_ids: res[pick.id] = True return res def _get_quant_reserved_exist(self, cr, uid, ids, field_name, arg, context=None): res = {} for pick in self.browse(cr, uid, ids, context=context): res[pick.id] = False for move in pick.move_lines: if move.reserved_quant_ids: res[pick.id] = True continue return res def check_group_lot(self, cr, uid, context=None): """ This function will return true if we have the setting to use lots activated. """ return self.pool.get('res.users').has_group(cr, uid, 'stock.group_production_lot') def check_group_pack(self, cr, uid, context=None): """ This function will return true if we have the setting to use package activated. """ return self.pool.get('res.users').has_group(cr, uid, 'stock.group_tracking_lot') def action_assign_owner(self, cr, uid, ids, context=None): for picking in self.browse(cr, uid, ids, context=context): packop_ids = [op.id for op in picking.pack_operation_ids] self.pool.get('stock.pack.operation').write(cr, uid, packop_ids, {'owner_id': picking.owner_id.id}, context=context) _columns = { 'name': fields.char('Reference', select=True, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, copy=False), 'origin': fields.char('Source Document', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, help="Reference of the document", select=True), 'backorder_id': fields.many2one('stock.picking', 'Back Order of', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, help="If this shipment was split, then this field links to the shipment which contains the already processed part.", select=True, copy=False), 'note': fields.text('Notes'), 'move_type': fields.selection([('direct', 'Partial'), ('one', 'All at once')], 'Delivery Method', required=True, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, help="It specifies goods to be deliver partially or all at once"), 'state': fields.function(_state_get, type="selection", copy=False, store={ 'stock.picking': (lambda self, cr, uid, ids, ctx: ids, ['move_type'], 20), 'stock.move': (_get_pickings, ['state', 'picking_id', 'partially_available'], 20)}, selection=[ ('draft', 'Draft'), ('cancel', 'Cancelled'), ('waiting', 'Waiting Another Operation'), ('confirmed', 'Waiting Availability'), ('partially_available', 'Partially Available'), ('assigned', 'Ready to Transfer'), ('done', 'Transferred'), ], string='Status', readonly=True, select=True, track_visibility='onchange', help=""" * Draft: not confirmed yet and will not be scheduled until confirmed\n * Waiting Another Operation: waiting for another move to proceed before it becomes automatically available (e.g. in Make-To-Order flows)\n * Waiting Availability: still waiting for the availability of products\n * Partially Available: some products are available and reserved\n * Ready to Transfer: products reserved, simply waiting for confirmation.\n * Transferred: has been processed, can't be modified or cancelled anymore\n * Cancelled: has been cancelled, can't be confirmed anymore""" ), 'priority': fields.function(get_min_max_date, multi="min_max_date", fnct_inv=_set_priority, type='selection', selection=procurement.PROCUREMENT_PRIORITIES, string='Priority', store={'stock.move': (_get_pickings_dates_priority, ['priority', 'picking_id'], 20)}, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, select=1, help="Priority for this picking. Setting manually a value here would set it as priority for all the moves", track_visibility='onchange', required=True), 'min_date': fields.function(get_min_max_date, multi="min_max_date", fnct_inv=_set_min_date, store={'stock.move': (_get_pickings_dates_priority, ['date_expected', 'picking_id'], 20)}, type='datetime', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, string='Scheduled Date', select=1, help="Scheduled time for the first part of the shipment to be processed. Setting manually a value here would set it as expected date for all the stock moves.", track_visibility='onchange'), 'max_date': fields.function(get_min_max_date, multi="min_max_date", store={'stock.move': (_get_pickings_dates_priority, ['date_expected', 'picking_id'], 20)}, type='datetime', string='Max. Expected Date', select=2, help="Scheduled time for the last part of the shipment to be processed"), 'date': fields.datetime('Creation Date', help="Creation Date, usually the time of the order", select=True, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, track_visibility='onchange'), 'date_done': fields.datetime('Date of Transfer', help="Date of Completion", states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, copy=False), 'move_lines': fields.one2many('stock.move', 'picking_id', 'Internal Moves', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, copy=True), 'quant_reserved_exist': fields.function(_get_quant_reserved_exist, type='boolean', string='Quant already reserved ?', help='technical field used to know if there is already at least one quant reserved on moves of a given picking'), 'partner_id': fields.many2one('res.partner', 'Partner', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}), 'company_id': fields.many2one('res.company', 'Company', required=True, select=True, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}), 'pack_operation_ids': fields.one2many('stock.pack.operation', 'picking_id', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, string='Related Packing Operations'), 'pack_operation_exist': fields.function(_get_pack_operation_exist, type='boolean', string='Pack Operation Exists?', help='technical field for attrs in view'), 'picking_type_id': fields.many2one('stock.picking.type', 'Picking Type', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, required=True), 'picking_type_code': fields.related('picking_type_id', 'code', type='char', string='Picking Type Code', help="Technical field used to display the correct label on print button in the picking view"), 'owner_id': fields.many2one('res.partner', 'Owner', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, help="Default Owner"), # Used to search on pickings 'product_id': fields.related('move_lines', 'product_id', type='many2one', relation='product.product', string='Product'), 'recompute_pack_op': fields.boolean('Recompute pack operation?', help='True if reserved quants changed, which mean we might need to recompute the package operations', copy=False), 'location_id': fields.related('move_lines', 'location_id', type='many2one', relation='stock.location', string='Location', readonly=True), 'location_dest_id': fields.related('move_lines', 'location_dest_id', type='many2one', relation='stock.location', string='Destination Location', readonly=True), 'group_id': fields.related('move_lines', 'group_id', type='many2one', relation='procurement.group', string='Procurement Group', readonly=True, store={ 'stock.picking': (lambda self, cr, uid, ids, ctx: ids, ['move_lines'], 10), 'stock.move': (_get_pickings, ['group_id', 'picking_id'], 10), }), } _defaults = { 'name': '/', 'state': 'draft', 'move_type': 'direct', 'priority': '1', # normal 'date': fields.datetime.now, 'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.picking', context=c), 'recompute_pack_op': True, } _sql_constraints = [ ('name_uniq', 'unique(name, company_id)', 'Reference must be unique per company!'), ] def do_print_picking(self, cr, uid, ids, context=None): '''This function prints the picking list''' context = dict(context or {}, active_ids=ids) return self.pool.get("report").get_action(cr, uid, ids, 'stock.report_picking', context=context) def action_confirm(self, cr, uid, ids, context=None): todo = [] todo_force_assign = [] for picking in self.browse(cr, uid, ids, context=context): if picking.location_id.usage in ('supplier', 'inventory', 'production'): todo_force_assign.append(picking.id) for r in picking.move_lines: if r.state == 'draft': todo.append(r.id) if len(todo): self.pool.get('stock.move').action_confirm(cr, uid, todo, context=context) if todo_force_assign: self.force_assign(cr, uid, todo_force_assign, context=context) return True def action_assign(self, cr, uid, ids, context=None): """ Check availability of picking moves. This has the effect of changing the state and reserve quants on available moves, and may also impact the state of the picking as it is computed based on move's states. @return: True """ for pick in self.browse(cr, uid, ids, context=context): if pick.state == 'draft': self.action_confirm(cr, uid, [pick.id], context=context) #skip the moves that don't need to be checked move_ids = [x.id for x in pick.move_lines if x.state not in ('draft', 'cancel', 'done')] if not move_ids: raise osv.except_osv(_('Warning!'), _('Nothing to check the availability for.')) self.pool.get('stock.move').action_assign(cr, uid, move_ids, context=context) return True def force_assign(self, cr, uid, ids, context=None): """ Changes state of picking to available if moves are confirmed or waiting. @return: True """ for pick in self.browse(cr, uid, ids, context=context): move_ids = [x.id for x in pick.move_lines if x.state in ['confirmed', 'waiting']] self.pool.get('stock.move').force_assign(cr, uid, move_ids, context=context) #pack_operation might have changed and need to be recomputed self.write(cr, uid, ids, {'recompute_pack_op': True}, context=context) return True def action_cancel(self, cr, uid, ids, context=None): for pick in self.browse(cr, uid, ids, context=context): ids2 = [move.id for move in pick.move_lines] self.pool.get('stock.move').action_cancel(cr, uid, ids2, context) return True def action_done(self, cr, uid, ids, context=None): """Changes picking state to done by processing the Stock Moves of the Picking Normally that happens when the button "Done" is pressed on a Picking view. @return: True """ for pick in self.browse(cr, uid, ids, context=context): todo = [] for move in pick.move_lines: if move.state == 'draft': todo.extend(self.pool.get('stock.move').action_confirm(cr, uid, [move.id], context=context)) elif move.state in ('assigned', 'confirmed'): todo.append(move.id) if len(todo): self.pool.get('stock.move').action_done(cr, uid, todo, context=context) return True def unlink(self, cr, uid, ids, context=None): #on picking deletion, cancel its move then unlink them too move_obj = self.pool.get('stock.move') context = context or {} for pick in self.browse(cr, uid, ids, context=context): move_ids = [move.id for move in pick.move_lines] move_obj.action_cancel(cr, uid, move_ids, context=context) move_obj.unlink(cr, uid, move_ids, context=context) return super(stock_picking, self).unlink(cr, uid, ids, context=context) def write(self, cr, uid, ids, vals, context=None): if vals.get('move_lines') and not vals.get('pack_operation_ids'): # pack operations are directly dependant of move lines, it needs to be recomputed pack_operation_obj = self.pool['stock.pack.operation'] existing_package_ids = pack_operation_obj.search(cr, uid, [('picking_id', 'in', ids)], context=context) if existing_package_ids: pack_operation_obj.unlink(cr, uid, existing_package_ids, context) res = super(stock_picking, self).write(cr, uid, ids, vals, context=context) #if we changed the move lines or the pack operations, we need to recompute the remaining quantities of both if 'move_lines' in vals or 'pack_operation_ids' in vals: self.do_recompute_remaining_quantities(cr, uid, ids, context=context) return res def _create_backorder(self, cr, uid, picking, backorder_moves=[], context=None): """ Move all non-done lines into a new backorder picking. If the key 'do_only_split' is given in the context, then move all lines not in context.get('split', []) instead of all non-done lines. """ if not backorder_moves: backorder_moves = picking.move_lines backorder_move_ids = [x.id for x in backorder_moves if x.state not in ('done', 'cancel')] if 'do_only_split' in context and context['do_only_split']: backorder_move_ids = [x.id for x in backorder_moves if x.id not in context.get('split', [])] if backorder_move_ids: backorder_id = self.copy(cr, uid, picking.id, { 'name': '/', 'move_lines': [], 'pack_operation_ids': [], 'backorder_id': picking.id, }) backorder = self.browse(cr, uid, backorder_id, context=context) self.message_post(cr, uid, picking.id, body=_("Back order <em>%s</em> <b>created</b>.") % (backorder.name), context=context) move_obj = self.pool.get("stock.move") move_obj.write(cr, uid, backorder_move_ids, {'picking_id': backorder_id}, context=context) if not picking.date_done: self.write(cr, uid, [picking.id], {'date_done': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)}, context=context) self.action_confirm(cr, uid, [backorder_id], context=context) return backorder_id return False @api.cr_uid_ids_context def recheck_availability(self, cr, uid, picking_ids, context=None): self.action_assign(cr, uid, picking_ids, context=context) self.do_prepare_partial(cr, uid, picking_ids, context=context) def _get_top_level_packages(self, cr, uid, quants_suggested_locations, context=None): """This method searches for the higher level packages that can be moved as a single operation, given a list of quants to move and their suggested destination, and returns the list of matching packages. """ # Try to find as much as possible top-level packages that can be moved pack_obj = self.pool.get("stock.quant.package") quant_obj = self.pool.get("stock.quant") top_lvl_packages = set() quants_to_compare = quants_suggested_locations.keys() for pack in list(set([x.package_id for x in quants_suggested_locations.keys() if x and x.package_id])): loop = True test_pack = pack good_pack = False pack_destination = False while loop: pack_quants = pack_obj.get_content(cr, uid, [test_pack.id], context=context) all_in = True for quant in quant_obj.browse(cr, uid, pack_quants, context=context): # If the quant is not in the quants to compare and not in the common location if not quant in quants_to_compare: all_in = False break else: #if putaway strat apply, the destination location of each quant may be different (and thus the package should not be taken as a single operation) if not pack_destination: pack_destination = quants_suggested_locations[quant] elif pack_destination != quants_suggested_locations[quant]: all_in = False break if all_in: good_pack = test_pack if test_pack.parent_id: test_pack = test_pack.parent_id else: #stop the loop when there's no parent package anymore loop = False else: #stop the loop when the package test_pack is not totally reserved for moves of this picking #(some quants may be reserved for other picking or not reserved at all) loop = False if good_pack: top_lvl_packages.add(good_pack) return list(top_lvl_packages) def _prepare_pack_ops(self, cr, uid, picking, quants, forced_qties, context=None): """ returns a list of dict, ready to be used in create() of stock.pack.operation. :param picking: browse record (stock.picking) :param quants: browse record list (stock.quant). List of quants associated to the picking :param forced_qties: dictionary showing for each product (keys) its corresponding quantity (value) that is not covered by the quants associated to the picking """ def _picking_putaway_apply(product): location = False # Search putaway strategy if product_putaway_strats.get(product.id): location = product_putaway_strats[product.id] else: location = self.pool.get('stock.location').get_putaway_strategy(cr, uid, picking.location_dest_id, product, context=context) product_putaway_strats[product.id] = location return location or picking.location_dest_id.id # If we encounter an UoM that is smaller than the default UoM or the one already chosen, use the new one instead. product_uom = {} # Determines UoM used in pack operations location_dest_id = None location_id = None for move in [x for x in picking.move_lines if x.state not in ('done', 'cancel')]: if not product_uom.get(move.product_id.id): product_uom[move.product_id.id] = move.product_id.uom_id if move.product_uom.id != move.product_id.uom_id.id and move.product_uom.factor > product_uom[move.product_id.id].factor: product_uom[move.product_id.id] = move.product_uom if not move.scrapped: if location_dest_id and move.location_dest_id.id != location_dest_id: raise Warning(_('The destination location must be the same for all the moves of the picking.')) location_dest_id = move.location_dest_id.id if location_id and move.location_id.id != location_id: raise Warning(_('The source location must be the same for all the moves of the picking.')) location_id = move.location_id.id pack_obj = self.pool.get("stock.quant.package") quant_obj = self.pool.get("stock.quant") vals = [] qtys_grouped = {} #for each quant of the picking, find the suggested location quants_suggested_locations = {} product_putaway_strats = {} for quant in quants: if quant.qty <= 0: continue suggested_location_id = _picking_putaway_apply(quant.product_id) quants_suggested_locations[quant] = suggested_location_id #find the packages we can movei as a whole top_lvl_packages = self._get_top_level_packages(cr, uid, quants_suggested_locations, context=context) # and then create pack operations for the top-level packages found for pack in top_lvl_packages: pack_quant_ids = pack_obj.get_content(cr, uid, [pack.id], context=context) pack_quants = quant_obj.browse(cr, uid, pack_quant_ids, context=context) vals.append({ 'picking_id': picking.id, 'package_id': pack.id, 'product_qty': 1.0, 'location_id': pack.location_id.id, 'location_dest_id': quants_suggested_locations[pack_quants[0]], 'owner_id': pack.owner_id.id, }) #remove the quants inside the package so that they are excluded from the rest of the computation for quant in pack_quants: del quants_suggested_locations[quant] # Go through all remaining reserved quants and group by product, package, lot, owner, source location and dest location for quant, dest_location_id in quants_suggested_locations.items(): key = (quant.product_id.id, quant.package_id.id, quant.lot_id.id, quant.owner_id.id, quant.location_id.id, dest_location_id) if qtys_grouped.get(key): qtys_grouped[key] += quant.qty else: qtys_grouped[key] = quant.qty # Do the same for the forced quantities (in cases of force_assign or incomming shipment for example) for product, qty in forced_qties.items(): if qty <= 0: continue suggested_location_id = _picking_putaway_apply(product) key = (product.id, False, False, picking.owner_id.id, picking.location_id.id, suggested_location_id) if qtys_grouped.get(key): qtys_grouped[key] += qty else: qtys_grouped[key] = qty # Create the necessary operations for the grouped quants and remaining qtys uom_obj = self.pool.get('product.uom') prevals = {} for key, qty in qtys_grouped.items(): product = self.pool.get("product.product").browse(cr, uid, key[0], context=context) uom_id = product.uom_id.id qty_uom = qty if product_uom.get(key[0]): uom_id = product_uom[key[0]].id qty_uom = uom_obj._compute_qty(cr, uid, product.uom_id.id, qty, uom_id) val_dict = { 'picking_id': picking.id, 'product_qty': qty_uom, 'product_id': key[0], 'package_id': key[1], 'lot_id': key[2], 'owner_id': key[3], 'location_id': key[4], 'location_dest_id': key[5], 'product_uom_id': uom_id, } if key[0] in prevals: prevals[key[0]].append(val_dict) else: prevals[key[0]] = [val_dict] # prevals var holds the operations in order to create them in the same order than the picking stock moves if possible processed_products = set() for move in [x for x in picking.move_lines if x.state not in ('done', 'cancel')]: if move.product_id.id not in processed_products: vals += prevals.get(move.product_id.id, []) processed_products.add(move.product_id.id) return vals @api.cr_uid_ids_context def open_barcode_interface(self, cr, uid, picking_ids, context=None): final_url="/barcode/web/#action=stock.ui&picking_id="+str(picking_ids[0]) return {'type': 'ir.actions.act_url', 'url':final_url, 'target': 'self',} @api.cr_uid_ids_context def do_partial_open_barcode(self, cr, uid, picking_ids, context=None): self.do_prepare_partial(cr, uid, picking_ids, context=context) return self.open_barcode_interface(cr, uid, picking_ids, context=context) @api.cr_uid_ids_context def do_prepare_partial(self, cr, uid, picking_ids, context=None): context = context or {} pack_operation_obj = self.pool.get('stock.pack.operation') #used to avoid recomputing the remaining quantities at each new pack operation created ctx = context.copy() ctx['no_recompute'] = True #get list of existing operations and delete them existing_package_ids = pack_operation_obj.search(cr, uid, [('picking_id', 'in', picking_ids)], context=context) if existing_package_ids: pack_operation_obj.unlink(cr, uid, existing_package_ids, context) for picking in self.browse(cr, uid, picking_ids, context=context): forced_qties = {} # Quantity remaining after calculating reserved quants picking_quants = [] #Calculate packages, reserved quants, qtys of this picking's moves for move in picking.move_lines: if move.state not in ('assigned', 'confirmed', 'waiting'): continue move_quants = move.reserved_quant_ids picking_quants += move_quants forced_qty = (move.state == 'assigned') and move.product_qty - sum([x.qty for x in move_quants]) or 0 #if we used force_assign() on the move, or if the move is incoming, forced_qty > 0 if float_compare(forced_qty, 0, precision_rounding=move.product_id.uom_id.rounding) > 0: if forced_qties.get(move.product_id): forced_qties[move.product_id] += forced_qty else: forced_qties[move.product_id] = forced_qty for vals in self._prepare_pack_ops(cr, uid, picking, picking_quants, forced_qties, context=context): pack_operation_obj.create(cr, uid, vals, context=ctx) #recompute the remaining quantities all at once self.do_recompute_remaining_quantities(cr, uid, picking_ids, context=context) self.write(cr, uid, picking_ids, {'recompute_pack_op': False}, context=context) @api.cr_uid_ids_context def do_unreserve(self, cr, uid, picking_ids, context=None): """ Will remove all quants for picking in picking_ids """ moves_to_unreserve = [] pack_line_to_unreserve = [] for picking in self.browse(cr, uid, picking_ids, context=context): moves_to_unreserve += [m.id for m in picking.move_lines if m.state not in ('done', 'cancel')] pack_line_to_unreserve += [p.id for p in picking.pack_operation_ids] if moves_to_unreserve: if pack_line_to_unreserve: self.pool.get('stock.pack.operation').unlink(cr, uid, pack_line_to_unreserve, context=context) self.pool.get('stock.move').do_unreserve(cr, uid, moves_to_unreserve, context=context) def recompute_remaining_qty(self, cr, uid, picking, context=None): def _create_link_for_index(operation_id, index, product_id, qty_to_assign, quant_id=False): move_dict = prod2move_ids[product_id][index] qty_on_link = min(move_dict['remaining_qty'], qty_to_assign) self.pool.get('stock.move.operation.link').create(cr, uid, {'move_id': move_dict['move'].id, 'operation_id': operation_id, 'qty': qty_on_link, 'reserved_quant_id': quant_id}, context=context) if move_dict['remaining_qty'] == qty_on_link: prod2move_ids[product_id].pop(index) else: move_dict['remaining_qty'] -= qty_on_link return qty_on_link def _create_link_for_quant(operation_id, quant, qty): """create a link for given operation and reserved move of given quant, for the max quantity possible, and returns this quantity""" if not quant.reservation_id.id: return _create_link_for_product(operation_id, quant.product_id.id, qty) qty_on_link = 0 for i in range(0, len(prod2move_ids[quant.product_id.id])): if prod2move_ids[quant.product_id.id][i]['move'].id != quant.reservation_id.id: continue qty_on_link = _create_link_for_index(operation_id, i, quant.product_id.id, qty, quant_id=quant.id) break return qty_on_link def _create_link_for_product(operation_id, product_id, qty): '''method that creates the link between a given operation and move(s) of given product, for the given quantity. Returns True if it was possible to create links for the requested quantity (False if there was not enough quantity on stock moves)''' qty_to_assign = qty prod_obj = self.pool.get("product.product") product = prod_obj.browse(cr, uid, product_id) rounding = product.uom_id.rounding qtyassign_cmp = float_compare(qty_to_assign, 0.0, precision_rounding=rounding) if prod2move_ids.get(product_id): while prod2move_ids[product_id] and qtyassign_cmp > 0: qty_on_link = _create_link_for_index(operation_id, 0, product_id, qty_to_assign, quant_id=False) qty_to_assign -= qty_on_link qtyassign_cmp = float_compare(qty_to_assign, 0.0, precision_rounding=rounding) return qtyassign_cmp == 0 uom_obj = self.pool.get('product.uom') package_obj = self.pool.get('stock.quant.package') quant_obj = self.pool.get('stock.quant') link_obj = self.pool.get('stock.move.operation.link') quants_in_package_done = set() prod2move_ids = {} still_to_do = [] #make a dictionary giving for each product, the moves and related quantity that can be used in operation links for move in [x for x in picking.move_lines if x.state not in ('done', 'cancel')]: if not prod2move_ids.get(move.product_id.id): prod2move_ids[move.product_id.id] = [{'move': move, 'remaining_qty': move.product_qty}] else: prod2move_ids[move.product_id.id].append({'move': move, 'remaining_qty': move.product_qty}) need_rereserve = False #sort the operations in order to give higher priority to those with a package, then a serial number operations = picking.pack_operation_ids operations = sorted(operations, key=lambda x: ((x.package_id and not x.product_id) and -4 or 0) + (x.package_id and -2 or 0) + (x.lot_id and -1 or 0)) #delete existing operations to start again from scratch links = link_obj.search(cr, uid, [('operation_id', 'in', [x.id for x in operations])], context=context) if links: link_obj.unlink(cr, uid, links, context=context) #1) first, try to create links when quants can be identified without any doubt for ops in operations: #for each operation, create the links with the stock move by seeking on the matching reserved quants, #and deffer the operation if there is some ambiguity on the move to select if ops.package_id and not ops.product_id: #entire package quant_ids = package_obj.get_content(cr, uid, [ops.package_id.id], context=context) for quant in quant_obj.browse(cr, uid, quant_ids, context=context): remaining_qty_on_quant = quant.qty if quant.reservation_id: #avoid quants being counted twice quants_in_package_done.add(quant.id) qty_on_link = _create_link_for_quant(ops.id, quant, quant.qty) remaining_qty_on_quant -= qty_on_link if remaining_qty_on_quant: still_to_do.append((ops, quant.product_id.id, remaining_qty_on_quant)) need_rereserve = True elif ops.product_id.id: #Check moves with same product qty_to_assign = uom_obj._compute_qty_obj(cr, uid, ops.product_uom_id, ops.product_qty, ops.product_id.uom_id, context=context) for move_dict in prod2move_ids.get(ops.product_id.id, []): move = move_dict['move'] for quant in move.reserved_quant_ids: if not qty_to_assign > 0: break if quant.id in quants_in_package_done: continue #check if the quant is matching the operation details if ops.package_id: flag = quant.package_id and bool(package_obj.search(cr, uid, [('id', 'child_of', [ops.package_id.id])], context=context)) or False else: flag = not quant.package_id.id flag = flag and ((ops.lot_id and ops.lot_id.id == quant.lot_id.id) or not ops.lot_id) flag = flag and (ops.owner_id.id == quant.owner_id.id) if flag: max_qty_on_link = min(quant.qty, qty_to_assign) qty_on_link = _create_link_for_quant(ops.id, quant, max_qty_on_link) qty_to_assign -= qty_on_link qty_assign_cmp = float_compare(qty_to_assign, 0, precision_rounding=ops.product_id.uom_id.rounding) if qty_assign_cmp > 0: #qty reserved is less than qty put in operations. We need to create a link but it's deferred after we processed #all the quants (because they leave no choice on their related move and needs to be processed with higher priority) still_to_do += [(ops, ops.product_id.id, qty_to_assign)] need_rereserve = True #2) then, process the remaining part all_op_processed = True for ops, product_id, remaining_qty in still_to_do: all_op_processed = _create_link_for_product(ops.id, product_id, remaining_qty) and all_op_processed return (need_rereserve, all_op_processed) def picking_recompute_remaining_quantities(self, cr, uid, picking, context=None): need_rereserve = False all_op_processed = True if picking.pack_operation_ids: need_rereserve, all_op_processed = self.recompute_remaining_qty(cr, uid, picking, context=context) return need_rereserve, all_op_processed @api.cr_uid_ids_context def do_recompute_remaining_quantities(self, cr, uid, picking_ids, context=None): for picking in self.browse(cr, uid, picking_ids, context=context): if picking.pack_operation_ids: self.recompute_remaining_qty(cr, uid, picking, context=context) def _prepare_values_extra_move(self, cr, uid, op, product, remaining_qty, context=None): """ Creates an extra move when there is no corresponding original move to be copied """ uom_obj = self.pool.get("product.uom") uom_id = product.uom_id.id qty = remaining_qty if op.product_id and op.product_uom_id and op.product_uom_id.id != product.uom_id.id: if op.product_uom_id.factor > product.uom_id.factor: #If the pack operation's is a smaller unit uom_id = op.product_uom_id.id #HALF-UP rounding as only rounding errors will be because of propagation of error from default UoM qty = uom_obj._compute_qty_obj(cr, uid, product.uom_id, remaining_qty, op.product_uom_id, rounding_method='HALF-UP') picking = op.picking_id ref = product.default_code name = '[' + ref + ']' + ' ' + product.name if ref else product.name res = { 'picking_id': picking.id, 'location_id': picking.location_id.id, 'location_dest_id': picking.location_dest_id.id, 'product_id': product.id, 'product_uom': uom_id, 'product_uom_qty': qty, 'name': _('Extra Move: ') + name, 'state': 'draft', 'restrict_partner_id': op.owner_id, 'group_id': picking.group_id.id, } return res def _create_extra_moves(self, cr, uid, picking, context=None): '''This function creates move lines on a picking, at the time of do_transfer, based on unexpected product transfers (or exceeding quantities) found in the pack operations. ''' move_obj = self.pool.get('stock.move') operation_obj = self.pool.get('stock.pack.operation') moves = [] for op in picking.pack_operation_ids: for product_id, remaining_qty in operation_obj._get_remaining_prod_quantities(cr, uid, op, context=context).items(): product = self.pool.get('product.product').browse(cr, uid, product_id, context=context) if float_compare(remaining_qty, 0, precision_rounding=product.uom_id.rounding) > 0: vals = self._prepare_values_extra_move(cr, uid, op, product, remaining_qty, context=context) moves.append(move_obj.create(cr, uid, vals, context=context)) if moves: move_obj.action_confirm(cr, uid, moves, context=context) return moves def rereserve_pick(self, cr, uid, ids, context=None): """ This can be used to provide a button that rereserves taking into account the existing pack operations """ for pick in self.browse(cr, uid, ids, context=context): self.rereserve_quants(cr, uid, pick, move_ids = [x.id for x in pick.move_lines], context=context) def rereserve_quants(self, cr, uid, picking, move_ids=[], context=None): """ Unreserve quants then try to reassign quants.""" stock_move_obj = self.pool.get('stock.move') if not move_ids: self.do_unreserve(cr, uid, [picking.id], context=context) self.action_assign(cr, uid, [picking.id], context=context) else: stock_move_obj.do_unreserve(cr, uid, move_ids, context=context) stock_move_obj.action_assign(cr, uid, move_ids, context=context) @api.cr_uid_ids_context def do_enter_transfer_details(self, cr, uid, picking, context=None): if not context: context = {} context.update({ 'active_model': self._name, 'active_ids': picking, 'active_id': len(picking) and picking[0] or False }) created_id = self.pool['stock.transfer_details'].create(cr, uid, {'picking_id': len(picking) and picking[0] or False}, context) return self.pool['stock.transfer_details'].wizard_view(cr, uid, created_id, context) @api.cr_uid_ids_context def do_transfer(self, cr, uid, picking_ids, context=None): """ If no pack operation, we do simple action_done of the picking Otherwise, do the pack operations """ if not context: context = {} stock_move_obj = self.pool.get('stock.move') for picking in self.browse(cr, uid, picking_ids, context=context): if not picking.pack_operation_ids: self.action_done(cr, uid, [picking.id], context=context) continue else: need_rereserve, all_op_processed = self.picking_recompute_remaining_quantities(cr, uid, picking, context=context) #create extra moves in the picking (unexpected product moves coming from pack operations) todo_move_ids = [] if not all_op_processed: todo_move_ids += self._create_extra_moves(cr, uid, picking, context=context) #split move lines if needed toassign_move_ids = [] for move in picking.move_lines: remaining_qty = move.remaining_qty if move.state in ('done', 'cancel'): #ignore stock moves cancelled or already done continue elif move.state == 'draft': toassign_move_ids.append(move.id) if float_compare(remaining_qty, 0, precision_rounding = move.product_id.uom_id.rounding) == 0: if move.state in ('draft', 'assigned', 'confirmed'): todo_move_ids.append(move.id) elif float_compare(remaining_qty,0, precision_rounding = move.product_id.uom_id.rounding) > 0 and \ float_compare(remaining_qty, move.product_qty, precision_rounding = move.product_id.uom_id.rounding) < 0: new_move = stock_move_obj.split(cr, uid, move, remaining_qty, context=context) todo_move_ids.append(move.id) #Assign move as it was assigned before toassign_move_ids.append(new_move) if need_rereserve or not all_op_processed: if not picking.location_id.usage in ("supplier", "production", "inventory"): self.rereserve_quants(cr, uid, picking, move_ids=todo_move_ids, context=context) self.do_recompute_remaining_quantities(cr, uid, [picking.id], context=context) if todo_move_ids and not context.get('do_only_split'): self.pool.get('stock.move').action_done(cr, uid, todo_move_ids, context=context) elif context.get('do_only_split'): context = dict(context, split=todo_move_ids) self._create_backorder(cr, uid, picking, context=context) if toassign_move_ids: stock_move_obj.action_assign(cr, uid, toassign_move_ids, context=context) return True @api.cr_uid_ids_context def do_split(self, cr, uid, picking_ids, context=None): """ just split the picking (create a backorder) without making it 'done' """ if context is None: context = {} ctx = context.copy() ctx['do_only_split'] = True return self.do_transfer(cr, uid, picking_ids, context=ctx) def get_next_picking_for_ui(self, cr, uid, context=None): """ returns the next pickings to process. Used in the barcode scanner UI""" if context is None: context = {} domain = [('state', 'in', ('assigned', 'partially_available'))] if context.get('default_picking_type_id'): domain.append(('picking_type_id', '=', context['default_picking_type_id'])) return self.search(cr, uid, domain, context=context) def action_done_from_ui(self, cr, uid, picking_id, context=None): """ called when button 'done' is pushed in the barcode scanner UI """ #write qty_done into field product_qty for every package_operation before doing the transfer pack_op_obj = self.pool.get('stock.pack.operation') for operation in self.browse(cr, uid, picking_id, context=context).pack_operation_ids: pack_op_obj.write(cr, uid, operation.id, {'product_qty': operation.qty_done}, context=context) self.do_transfer(cr, uid, [picking_id], context=context) #return id of next picking to work on return self.get_next_picking_for_ui(cr, uid, context=context) @api.cr_uid_ids_context def action_pack(self, cr, uid, picking_ids, operation_filter_ids=None, context=None): """ Create a package with the current pack_operation_ids of the picking that aren't yet in a pack. Used in the barcode scanner UI and the normal interface as well. operation_filter_ids is used by barcode scanner interface to specify a subset of operation to pack""" if operation_filter_ids == None: operation_filter_ids = [] stock_operation_obj = self.pool.get('stock.pack.operation') package_obj = self.pool.get('stock.quant.package') stock_move_obj = self.pool.get('stock.move') package_id = False for picking_id in picking_ids: operation_search_domain = [('picking_id', '=', picking_id), ('result_package_id', '=', False)] if operation_filter_ids != []: operation_search_domain.append(('id', 'in', operation_filter_ids)) operation_ids = stock_operation_obj.search(cr, uid, operation_search_domain, context=context) pack_operation_ids = [] if operation_ids: for operation in stock_operation_obj.browse(cr, uid, operation_ids, context=context): #If we haven't done all qty in operation, we have to split into 2 operation op = operation if (operation.qty_done < operation.product_qty): new_operation = stock_operation_obj.copy(cr, uid, operation.id, {'product_qty': operation.qty_done,'qty_done': operation.qty_done}, context=context) stock_operation_obj.write(cr, uid, operation.id, {'product_qty': operation.product_qty - operation.qty_done,'qty_done': 0}, context=context) op = stock_operation_obj.browse(cr, uid, new_operation, context=context) pack_operation_ids.append(op.id) if op.product_id and op.location_id and op.location_dest_id: stock_move_obj.check_tracking_product(cr, uid, op.product_id, op.lot_id.id, op.location_id, op.location_dest_id, context=context) package_id = package_obj.create(cr, uid, {}, context=context) stock_operation_obj.write(cr, uid, pack_operation_ids, {'result_package_id': package_id}, context=context) return package_id def process_product_id_from_ui(self, cr, uid, picking_id, product_id, op_id, increment=True, context=None): return self.pool.get('stock.pack.operation')._search_and_increment(cr, uid, picking_id, [('product_id', '=', product_id),('id', '=', op_id)], increment=increment, context=context) def process_barcode_from_ui(self, cr, uid, picking_id, barcode_str, visible_op_ids, context=None): '''This function is called each time there barcode scanner reads an input''' lot_obj = self.pool.get('stock.production.lot') package_obj = self.pool.get('stock.quant.package') product_obj = self.pool.get('product.product') stock_operation_obj = self.pool.get('stock.pack.operation') stock_location_obj = self.pool.get('stock.location') answer = {'filter_loc': False, 'operation_id': False} #check if the barcode correspond to a location matching_location_ids = stock_location_obj.search(cr, uid, [('loc_barcode', '=', barcode_str)], context=context) if matching_location_ids: #if we have a location, return immediatly with the location name location = stock_location_obj.browse(cr, uid, matching_location_ids[0], context=None) answer['filter_loc'] = stock_location_obj._name_get(cr, uid, location, context=None) answer['filter_loc_id'] = matching_location_ids[0] return answer #check if the barcode correspond to a product matching_product_ids = product_obj.search(cr, uid, ['|', ('ean13', '=', barcode_str), ('default_code', '=', barcode_str)], context=context) if matching_product_ids: op_id = stock_operation_obj._search_and_increment(cr, uid, picking_id, [('product_id', '=', matching_product_ids[0])], filter_visible=True, visible_op_ids=visible_op_ids, increment=True, context=context) answer['operation_id'] = op_id return answer #check if the barcode correspond to a lot matching_lot_ids = lot_obj.search(cr, uid, [('name', '=', barcode_str)], context=context) if matching_lot_ids: lot = lot_obj.browse(cr, uid, matching_lot_ids[0], context=context) op_id = stock_operation_obj._search_and_increment(cr, uid, picking_id, [('product_id', '=', lot.product_id.id), ('lot_id', '=', lot.id)], filter_visible=True, visible_op_ids=visible_op_ids, increment=True, context=context) answer['operation_id'] = op_id return answer #check if the barcode correspond to a package matching_package_ids = package_obj.search(cr, uid, [('name', '=', barcode_str)], context=context) if matching_package_ids: op_id = stock_operation_obj._search_and_increment(cr, uid, picking_id, [('package_id', '=', matching_package_ids[0])], filter_visible=True, visible_op_ids=visible_op_ids, increment=True, context=context) answer['operation_id'] = op_id return answer return answer class stock_production_lot(osv.osv): _name = 'stock.production.lot' _inherit = ['mail.thread'] _description = 'Lot/Serial' _columns = { 'name': fields.char('Serial Number', required=True, help="Unique Serial Number"), 'ref': fields.char('Internal Reference', help="Internal reference number in case it differs from the manufacturer's serial number"), 'product_id': fields.many2one('product.product', 'Product', required=True, domain=[('type', '<>', 'service')]), 'quant_ids': fields.one2many('stock.quant', 'lot_id', 'Quants', readonly=True), 'create_date': fields.datetime('Creation Date'), } _defaults = { 'name': lambda x, y, z, c: x.pool.get('ir.sequence').get(y, z, 'stock.lot.serial'), 'product_id': lambda x, y, z, c: c.get('product_id', False), } _sql_constraints = [ ('name_ref_uniq', 'unique (name, ref, product_id)', 'The combination of serial number, internal reference and product must be unique !'), ] def action_traceability(self, cr, uid, ids, context=None): """ It traces the information of lots @param self: The object pointer. @param cr: A database cursor @param uid: ID of the user currently logged in @param ids: List of IDs selected @param context: A standard dictionary @return: A dictionary of values """ quant_obj = self.pool.get("stock.quant") quants = quant_obj.search(cr, uid, [('lot_id', 'in', ids)], context=context) moves = set() for quant in quant_obj.browse(cr, uid, quants, context=context): moves |= {move.id for move in quant.history_ids} if moves: return { 'domain': "[('id','in',[" + ','.join(map(str, list(moves))) + "])]", 'name': _('Traceability'), 'view_mode': 'tree,form', 'view_type': 'form', 'context': {'tree_view_ref': 'stock.view_move_tree'}, 'res_model': 'stock.move', 'type': 'ir.actions.act_window', } return False # ---------------------------------------------------- # Move # ---------------------------------------------------- class stock_move(osv.osv): _name = "stock.move" _description = "Stock Move" _order = 'date_expected desc, id' _log_create = False def get_price_unit(self, cr, uid, move, context=None): """ Returns the unit price to store on the quant """ return move.price_unit or move.product_id.standard_price def name_get(self, cr, uid, ids, context=None): res = [] for line in self.browse(cr, uid, ids, context=context): name = line.location_id.name + ' > ' + line.location_dest_id.name if line.product_id.code: name = line.product_id.code + ': ' + name if line.picking_id.origin: name = line.picking_id.origin + '/ ' + name res.append((line.id, name)) return res def _quantity_normalize(self, cr, uid, ids, name, args, context=None): uom_obj = self.pool.get('product.uom') res = {} for m in self.browse(cr, uid, ids, context=context): res[m.id] = uom_obj._compute_qty_obj(cr, uid, m.product_uom, m.product_uom_qty, m.product_id.uom_id, context=context) return res def _get_remaining_qty(self, cr, uid, ids, field_name, args, context=None): uom_obj = self.pool.get('product.uom') res = {} for move in self.browse(cr, uid, ids, context=context): qty = move.product_qty for record in move.linked_move_operation_ids: qty -= record.qty # Keeping in product default UoM res[move.id] = float_round(qty, precision_rounding=move.product_id.uom_id.rounding) return res def _get_lot_ids(self, cr, uid, ids, field_name, args, context=None): res = dict.fromkeys(ids, False) for move in self.browse(cr, uid, ids, context=context): if move.state == 'done': res[move.id] = [q.lot_id.id for q in move.quant_ids if q.lot_id] else: res[move.id] = [q.lot_id.id for q in move.reserved_quant_ids if q.lot_id] return res def _get_product_availability(self, cr, uid, ids, field_name, args, context=None): quant_obj = self.pool.get('stock.quant') res = dict.fromkeys(ids, False) for move in self.browse(cr, uid, ids, context=context): if move.state == 'done': res[move.id] = move.product_qty else: sublocation_ids = self.pool.get('stock.location').search(cr, uid, [('id', 'child_of', [move.location_id.id])], context=context) quant_ids = quant_obj.search(cr, uid, [('location_id', 'in', sublocation_ids), ('product_id', '=', move.product_id.id), ('reservation_id', '=', False)], context=context) availability = 0 for quant in quant_obj.browse(cr, uid, quant_ids, context=context): availability += quant.qty res[move.id] = min(move.product_qty, availability) return res def _get_string_qty_information(self, cr, uid, ids, field_name, args, context=None): settings_obj = self.pool.get('stock.config.settings') uom_obj = self.pool.get('product.uom') res = dict.fromkeys(ids, '') precision = self.pool['decimal.precision'].precision_get(cr, uid, 'Product Unit of Measure') for move in self.browse(cr, uid, ids, context=context): if move.state in ('draft', 'done', 'cancel') or move.location_id.usage != 'internal': res[move.id] = '' # 'not applicable' or 'n/a' could work too continue total_available = min(move.product_qty, move.reserved_availability + move.availability) total_available = uom_obj._compute_qty_obj(cr, uid, move.product_id.uom_id, total_available, move.product_uom, round=False, context=context) total_available = float_round(total_available, precision_digits=precision) info = str(total_available) #look in the settings if we need to display the UoM name or not config_ids = settings_obj.search(cr, uid, [], limit=1, order='id DESC', context=context) if config_ids: stock_settings = settings_obj.browse(cr, uid, config_ids[0], context=context) if stock_settings.group_uom: info += ' ' + move.product_uom.name if move.reserved_availability: if move.reserved_availability != total_available: #some of the available quantity is assigned and some are available but not reserved reserved_available = uom_obj._compute_qty_obj(cr, uid, move.product_id.uom_id, move.reserved_availability, move.product_uom, round=False, context=context) reserved_available = float_round(reserved_available, precision_digits=precision) info += _(' (%s reserved)') % str(reserved_available) else: #all available quantity is assigned info += _(' (reserved)') res[move.id] = info return res def _get_reserved_availability(self, cr, uid, ids, field_name, args, context=None): res = dict.fromkeys(ids, 0) for move in self.browse(cr, uid, ids, context=context): res[move.id] = sum([quant.qty for quant in move.reserved_quant_ids]) return res def _get_move(self, cr, uid, ids, context=None): res = set() for quant in self.browse(cr, uid, ids, context=context): if quant.reservation_id: res.add(quant.reservation_id.id) return list(res) def _get_move_ids(self, cr, uid, ids, context=None): res = [] for picking in self.browse(cr, uid, ids, context=context): res += [x.id for x in picking.move_lines] return res def _get_moves_from_prod(self, cr, uid, ids, context=None): if ids: return self.pool.get('stock.move').search(cr, uid, [('product_id', 'in', ids)], context=context) return [] def _set_product_qty(self, cr, uid, id, field, value, arg, context=None): """ The meaning of product_qty field changed lately and is now a functional field computing the quantity in the default product UoM. This code has been added to raise an error if a write is made given a value for `product_qty`, where the same write should set the `product_uom_qty` field instead, in order to detect errors. """ raise osv.except_osv(_('Programming Error!'), _('The requested operation cannot be processed because of a programming error setting the `product_qty` field instead of the `product_uom_qty`.')) _columns = { 'name': fields.char('Description', required=True, select=True), 'priority': fields.selection(procurement.PROCUREMENT_PRIORITIES, 'Priority'), 'create_date': fields.datetime('Creation Date', readonly=True, select=True), 'date': fields.datetime('Date', required=True, select=True, help="Move date: scheduled date until move is done, then date of actual move processing", states={'done': [('readonly', True)]}), 'date_expected': fields.datetime('Expected Date', states={'done': [('readonly', True)]}, required=True, select=True, help="Scheduled date for the processing of this move"), 'product_id': fields.many2one('product.product', 'Product', required=True, select=True, domain=[('type', '<>', 'service')], states={'done': [('readonly', True)]}), 'product_qty': fields.function(_quantity_normalize, fnct_inv=_set_product_qty, type='float', digits=0, store={ _name: (lambda self, cr, uid, ids, c={}: ids, ['product_id', 'product_uom', 'product_uom_qty'], 10), }, string='Quantity', help='Quantity in the default UoM of the product'), 'product_uom_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True, states={'done': [('readonly', True)]}, help="This is the quantity of products from an inventory " "point of view. For moves in the state 'done', this is the " "quantity of products that were actually moved. For other " "moves, this is the quantity of product that is planned to " "be moved. Lowering this quantity does not generate a " "backorder. Changing this quantity on assigned moves affects " "the product reservation, and should be done with care." ), 'product_uom': fields.many2one('product.uom', 'Unit of Measure', required=True, states={'done': [('readonly', True)]}), 'product_uos_qty': fields.float('Quantity (UOS)', digits_compute=dp.get_precision('Product UoS'), states={'done': [('readonly', True)]}), 'product_uos': fields.many2one('product.uom', 'Product UOS', states={'done': [('readonly', True)]}), 'product_tmpl_id': fields.related('product_id', 'product_tmpl_id', type='many2one', relation='product.template', string='Product Template'), 'product_packaging': fields.many2one('product.packaging', 'Prefered Packaging', help="It specifies attributes of packaging like type, quantity of packaging,etc."), 'location_id': fields.many2one('stock.location', 'Source Location', required=True, select=True, auto_join=True, states={'done': [('readonly', True)]}, help="Sets a location if you produce at a fixed location. This can be a partner location if you subcontract the manufacturing operations."), 'location_dest_id': fields.many2one('stock.location', 'Destination Location', required=True, states={'done': [('readonly', True)]}, select=True, auto_join=True, help="Location where the system will stock the finished products."), 'partner_id': fields.many2one('res.partner', 'Destination Address ', states={'done': [('readonly', True)]}, help="Optional address where goods are to be delivered, specifically used for allotment"), 'move_dest_id': fields.many2one('stock.move', 'Destination Move', help="Optional: next stock move when chaining them", select=True, copy=False), 'move_orig_ids': fields.one2many('stock.move', 'move_dest_id', 'Original Move', help="Optional: previous stock move when chaining them", select=True), 'picking_id': fields.many2one('stock.picking', 'Reference', select=True, states={'done': [('readonly', True)]}), 'note': fields.text('Notes'), 'state': fields.selection([('draft', 'New'), ('cancel', 'Cancelled'), ('waiting', 'Waiting Another Move'), ('confirmed', 'Waiting Availability'), ('assigned', 'Available'), ('done', 'Done'), ], 'Status', readonly=True, select=True, copy=False, help= "* New: When the stock move is created and not yet confirmed.\n"\ "* Waiting Another Move: This state can be seen when a move is waiting for another one, for example in a chained flow.\n"\ "* Waiting Availability: This state is reached when the procurement resolution is not straight forward. It may need the scheduler to run, a component to me manufactured...\n"\ "* Available: When products are reserved, it is set to \'Available\'.\n"\ "* Done: When the shipment is processed, the state is \'Done\'."), 'partially_available': fields.boolean('Partially Available', readonly=True, help="Checks if the move has some stock reserved", copy=False), 'price_unit': fields.float('Unit Price', help="Technical field used to record the product cost set by the user during a picking confirmation (when costing method used is 'average price' or 'real'). Value given in company currency and in product uom."), # as it's a technical field, we intentionally don't provide the digits attribute 'company_id': fields.many2one('res.company', 'Company', required=True, select=True), 'split_from': fields.many2one('stock.move', string="Move Split From", help="Technical field used to track the origin of a split move, which can be useful in case of debug", copy=False), 'backorder_id': fields.related('picking_id', 'backorder_id', type='many2one', relation="stock.picking", string="Back Order of", select=True), 'origin': fields.char("Source"), 'procure_method': fields.selection([('make_to_stock', 'Default: Take From Stock'), ('make_to_order', 'Advanced: Apply Procurement Rules')], 'Supply Method', required=True, help="""By default, the system will take from the stock in the source location and passively wait for availability. The other possibility allows you to directly create a procurement on the source location (and thus ignore its current stock) to gather products. If we want to chain moves and have this one to wait for the previous, this second option should be chosen."""), # used for colors in tree views: 'scrapped': fields.related('location_dest_id', 'scrap_location', type='boolean', relation='stock.location', string='Scrapped', readonly=True), 'quant_ids': fields.many2many('stock.quant', 'stock_quant_move_rel', 'move_id', 'quant_id', 'Moved Quants', copy=False), 'reserved_quant_ids': fields.one2many('stock.quant', 'reservation_id', 'Reserved quants'), 'linked_move_operation_ids': fields.one2many('stock.move.operation.link', 'move_id', string='Linked Operations', readonly=True, help='Operations that impact this move for the computation of the remaining quantities'), 'remaining_qty': fields.function(_get_remaining_qty, type='float', string='Remaining Quantity', digits=0, states={'done': [('readonly', True)]}, help="Remaining Quantity in default UoM according to operations matched with this move"), 'procurement_id': fields.many2one('procurement.order', 'Procurement'), 'group_id': fields.many2one('procurement.group', 'Procurement Group'), 'rule_id': fields.many2one('procurement.rule', 'Procurement Rule', help='The pull rule that created this stock move'), 'push_rule_id': fields.many2one('stock.location.path', 'Push Rule', help='The push rule that created this stock move'), 'propagate': fields.boolean('Propagate cancel and split', help='If checked, when this move is cancelled, cancel the linked move too'), 'picking_type_id': fields.many2one('stock.picking.type', 'Picking Type'), 'inventory_id': fields.many2one('stock.inventory', 'Inventory'), 'lot_ids': fields.function(_get_lot_ids, type='many2many', relation='stock.production.lot', string='Lots'), 'origin_returned_move_id': fields.many2one('stock.move', 'Origin return move', help='move that created the return move', copy=False), 'returned_move_ids': fields.one2many('stock.move', 'origin_returned_move_id', 'All returned moves', help='Optional: all returned moves created from this move'), 'reserved_availability': fields.function(_get_reserved_availability, type='float', string='Quantity Reserved', readonly=True, help='Quantity that has already been reserved for this move'), 'availability': fields.function(_get_product_availability, type='float', string='Quantity Available', readonly=True, help='Quantity in stock that can still be reserved for this move'), 'string_availability_info': fields.function(_get_string_qty_information, type='text', string='Availability', readonly=True, help='Show various information on stock availability for this move'), 'restrict_lot_id': fields.many2one('stock.production.lot', 'Lot', help="Technical field used to depict a restriction on the lot of quants to consider when marking this move as 'done'"), 'restrict_partner_id': fields.many2one('res.partner', 'Owner ', help="Technical field used to depict a restriction on the ownership of quants to consider when marking this move as 'done'"), 'route_ids': fields.many2many('stock.location.route', 'stock_location_route_move', 'move_id', 'route_id', 'Destination route', help="Preferred route to be followed by the procurement order"), 'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', help="Technical field depicting the warehouse to consider for the route selection on the next procurement (if any)."), } def _default_location_destination(self, cr, uid, context=None): context = context or {} if context.get('default_picking_type_id', False): pick_type = self.pool.get('stock.picking.type').browse(cr, uid, context['default_picking_type_id'], context=context) return pick_type.default_location_dest_id and pick_type.default_location_dest_id.id or False return False def _default_location_source(self, cr, uid, context=None): context = context or {} if context.get('default_picking_type_id', False): pick_type = self.pool.get('stock.picking.type').browse(cr, uid, context['default_picking_type_id'], context=context) return pick_type.default_location_src_id and pick_type.default_location_src_id.id or False return False def _default_destination_address(self, cr, uid, context=None): return False def _default_group_id(self, cr, uid, context=None): context = context or {} if context.get('default_picking_id', False): picking = self.pool.get('stock.picking').browse(cr, uid, context['default_picking_id'], context=context) return picking.group_id.id return False _defaults = { 'location_id': _default_location_source, 'location_dest_id': _default_location_destination, 'partner_id': _default_destination_address, 'state': 'draft', 'priority': '1', 'product_uom_qty': 1.0, 'scrapped': False, 'date': fields.datetime.now, 'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.move', context=c), 'date_expected': fields.datetime.now, 'procure_method': 'make_to_stock', 'propagate': True, 'partially_available': False, 'group_id': _default_group_id, } def _check_uom(self, cr, uid, ids, context=None): for move in self.browse(cr, uid, ids, context=context): if move.product_id.uom_id.category_id.id != move.product_uom.category_id.id: return False return True _constraints = [ (_check_uom, 'You try to move a product using a UoM that is not compatible with the UoM of the product moved. Please use an UoM in the same UoM category.', ['product_uom']), ] def init(self, cr): cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('stock_move_product_location_index',)) if not cr.fetchone(): cr.execute('CREATE INDEX stock_move_product_location_index ON stock_move (product_id, location_id, location_dest_id, company_id, state)') @api.cr_uid_ids_context def do_unreserve(self, cr, uid, move_ids, context=None): quant_obj = self.pool.get("stock.quant") for move in self.browse(cr, uid, move_ids, context=context): if move.state in ('done', 'cancel'): raise osv.except_osv(_('Operation Forbidden!'), _('Cannot unreserve a done move')) quant_obj.quants_unreserve(cr, uid, move, context=context) if self.find_move_ancestors(cr, uid, move, context=context): self.write(cr, uid, [move.id], {'state': 'waiting'}, context=context) else: self.write(cr, uid, [move.id], {'state': 'confirmed'}, context=context) def _prepare_procurement_from_move(self, cr, uid, move, context=None): origin = (move.group_id and (move.group_id.name + ":") or "") + (move.rule_id and move.rule_id.name or move.origin or move.picking_id.name or "/") group_id = move.group_id and move.group_id.id or False if move.rule_id: if move.rule_id.group_propagation_option == 'fixed' and move.rule_id.group_id: group_id = move.rule_id.group_id.id elif move.rule_id.group_propagation_option == 'none': group_id = False return { 'name': move.rule_id and move.rule_id.name or "/", 'origin': origin, 'company_id': move.company_id and move.company_id.id or False, 'date_planned': move.date, 'product_id': move.product_id.id, 'product_qty': move.product_uom_qty, 'product_uom': move.product_uom.id, 'product_uos_qty': (move.product_uos and move.product_uos_qty) or move.product_uom_qty, 'product_uos': (move.product_uos and move.product_uos.id) or move.product_uom.id, 'location_id': move.location_id.id, 'move_dest_id': move.id, 'group_id': group_id, 'route_ids': [(4, x.id) for x in move.route_ids], 'warehouse_id': move.warehouse_id.id or (move.picking_type_id and move.picking_type_id.warehouse_id.id or False), 'priority': move.priority, } def _push_apply(self, cr, uid, moves, context=None): push_obj = self.pool.get("stock.location.path") for move in moves: #1) if the move is already chained, there is no need to check push rules #2) if the move is a returned move, we don't want to check push rules, as returning a returned move is the only decent way # to receive goods without triggering the push rules again (which would duplicate chained operations) if not move.move_dest_id and not move.origin_returned_move_id: domain = [('location_from_id', '=', move.location_dest_id.id)] #priority goes to the route defined on the product and product category route_ids = [x.id for x in move.product_id.route_ids + move.product_id.categ_id.total_route_ids] rules = push_obj.search(cr, uid, domain + [('route_id', 'in', route_ids)], order='route_sequence, sequence', context=context) if not rules: #then we search on the warehouse if a rule can apply wh_route_ids = [] if move.warehouse_id: wh_route_ids = [x.id for x in move.warehouse_id.route_ids] elif move.picking_type_id and move.picking_type_id.warehouse_id: wh_route_ids = [x.id for x in move.picking_type_id.warehouse_id.route_ids] if wh_route_ids: rules = push_obj.search(cr, uid, domain + [('route_id', 'in', wh_route_ids)], order='route_sequence, sequence', context=context) if not rules: #if no specialized push rule has been found yet, we try to find a general one (without route) rules = push_obj.search(cr, uid, domain + [('route_id', '=', False)], order='sequence', context=context) if rules: rule = push_obj.browse(cr, uid, rules[0], context=context) push_obj._apply(cr, uid, rule, move, context=context) return True def _create_procurement(self, cr, uid, move, context=None): """ This will create a procurement order """ return self.pool.get("procurement.order").create(cr, uid, self._prepare_procurement_from_move(cr, uid, move, context=context), context=context) def _create_procurements(self, cr, uid, moves, context=None): res = [] for move in moves: res.append(self._create_procurement(cr, uid, move, context=context)) return res def write(self, cr, uid, ids, vals, context=None): if context is None: context = {} if isinstance(ids, (int, long)): ids = [ids] # Check that we do not modify a stock.move which is done frozen_fields = set(['product_qty', 'product_uom', 'product_uos_qty', 'product_uos', 'location_id', 'location_dest_id', 'product_id']) for move in self.browse(cr, uid, ids, context=context): if move.state == 'done': if frozen_fields.intersection(vals): raise osv.except_osv(_('Operation Forbidden!'), _('Quantities, Units of Measure, Products and Locations cannot be modified on stock moves that have already been processed (except by the Administrator).')) propagated_changes_dict = {} #propagation of quantity change if vals.get('product_uom_qty'): propagated_changes_dict['product_uom_qty'] = vals['product_uom_qty'] if vals.get('product_uom_id'): propagated_changes_dict['product_uom_id'] = vals['product_uom_id'] #propagation of expected date: propagated_date_field = False if vals.get('date_expected'): #propagate any manual change of the expected date propagated_date_field = 'date_expected' elif (vals.get('state', '') == 'done' and vals.get('date')): #propagate also any delta observed when setting the move as done propagated_date_field = 'date' if not context.get('do_not_propagate', False) and (propagated_date_field or propagated_changes_dict): #any propagation is (maybe) needed for move in self.browse(cr, uid, ids, context=context): if move.move_dest_id and move.propagate: if 'date_expected' in propagated_changes_dict: propagated_changes_dict.pop('date_expected') if propagated_date_field: current_date = datetime.strptime(move.date_expected, DEFAULT_SERVER_DATETIME_FORMAT) new_date = datetime.strptime(vals.get(propagated_date_field), DEFAULT_SERVER_DATETIME_FORMAT) delta = new_date - current_date if abs(delta.days) >= move.company_id.propagation_minimum_delta: old_move_date = datetime.strptime(move.move_dest_id.date_expected, DEFAULT_SERVER_DATETIME_FORMAT) new_move_date = (old_move_date + relativedelta.relativedelta(days=delta.days or 0)).strftime(DEFAULT_SERVER_DATETIME_FORMAT) propagated_changes_dict['date_expected'] = new_move_date #For pushed moves as well as for pulled moves, propagate by recursive call of write(). #Note that, for pulled moves we intentionally don't propagate on the procurement. if propagated_changes_dict: self.write(cr, uid, [move.move_dest_id.id], propagated_changes_dict, context=context) return super(stock_move, self).write(cr, uid, ids, vals, context=context) def onchange_quantity(self, cr, uid, ids, product_id, product_qty, product_uom, product_uos): """ On change of product quantity finds UoM and UoS quantities @param product_id: Product id @param product_qty: Changed Quantity of product @param product_uom: Unit of measure of product @param product_uos: Unit of sale of product @return: Dictionary of values """ result = { 'product_uos_qty': 0.00 } warning = {} if (not product_id) or (product_qty <= 0.0): result['product_qty'] = 0.0 return {'value': result} product_obj = self.pool.get('product.product') uos_coeff = product_obj.read(cr, uid, product_id, ['uos_coeff']) # Warn if the quantity was decreased if ids: for move in self.read(cr, uid, ids, ['product_qty']): if product_qty < move['product_qty']: warning.update({ 'title': _('Information'), 'message': _("By changing this quantity here, you accept the " "new quantity as complete: Odoo will not " "automatically generate a back order.")}) break if product_uos and product_uom and (product_uom != product_uos): precision = self.pool.get('decimal.precision').precision_get(cr, uid, 'Product UoS') result['product_uos_qty'] = float_round(product_qty * uos_coeff['uos_coeff'], precision_digits=precision) else: result['product_uos_qty'] = product_qty return {'value': result, 'warning': warning} def onchange_uos_quantity(self, cr, uid, ids, product_id, product_uos_qty, product_uos, product_uom): """ On change of product quantity finds UoM and UoS quantities @param product_id: Product id @param product_uos_qty: Changed UoS Quantity of product @param product_uom: Unit of measure of product @param product_uos: Unit of sale of product @return: Dictionary of values """ result = { 'product_uom_qty': 0.00 } if (not product_id) or (product_uos_qty <= 0.0): result['product_uos_qty'] = 0.0 return {'value': result} product_obj = self.pool.get('product.product') uos_coeff = product_obj.read(cr, uid, product_id, ['uos_coeff']) # No warning if the quantity was decreased to avoid double warnings: # The clients should call onchange_quantity too anyway if product_uos and product_uom and (product_uom != product_uos): precision = self.pool.get('decimal.precision').precision_get(cr, uid, 'Product Unit of Measure') result['product_uom_qty'] = float_round(product_uos_qty / uos_coeff['uos_coeff'], precision_digits=precision) else: result['product_uom_qty'] = product_uos_qty return {'value': result} def onchange_product_id(self, cr, uid, ids, prod_id=False, loc_id=False, loc_dest_id=False, partner_id=False): """ On change of product id, if finds UoM, UoS, quantity and UoS quantity. @param prod_id: Changed Product id @param loc_id: Source location id @param loc_dest_id: Destination location id @param partner_id: Address id of partner @return: Dictionary of values """ if not prod_id: return {} user = self.pool.get('res.users').browse(cr, uid, uid) lang = user and user.lang or False if partner_id: addr_rec = self.pool.get('res.partner').browse(cr, uid, partner_id) if addr_rec: lang = addr_rec and addr_rec.lang or False ctx = {'lang': lang} product = self.pool.get('product.product').browse(cr, uid, [prod_id], context=ctx)[0] uos_id = product.uos_id and product.uos_id.id or False result = { 'name': product.partner_ref, 'product_uom': product.uom_id.id, 'product_uos': uos_id, 'product_uom_qty': 1.00, 'product_uos_qty': self.pool.get('stock.move').onchange_quantity(cr, uid, ids, prod_id, 1.00, product.uom_id.id, uos_id)['value']['product_uos_qty'], } if loc_id: result['location_id'] = loc_id if loc_dest_id: result['location_dest_id'] = loc_dest_id return {'value': result} def _prepare_picking_assign(self, cr, uid, move, context=None): """ Prepares a new picking for this move as it could not be assigned to another picking. This method is designed to be inherited. """ values = { 'origin': move.origin, 'company_id': move.company_id and move.company_id.id or False, 'move_type': move.group_id and move.group_id.move_type or 'direct', 'partner_id': move.partner_id.id or False, 'picking_type_id': move.picking_type_id and move.picking_type_id.id or False, } return values @api.cr_uid_ids_context def _picking_assign(self, cr, uid, move_ids, procurement_group, location_from, location_to, context=None): """Assign a picking on the given move_ids, which is a list of move supposed to share the same procurement_group, location_from and location_to (and company). Those attributes are also given as parameters. """ pick_obj = self.pool.get("stock.picking") # Use a SQL query as doing with the ORM will split it in different queries with id IN (,,) # In the next version, the locations on the picking should be stored again. query = """ SELECT stock_picking.id FROM stock_picking, stock_move WHERE stock_picking.state in ('draft', 'confirmed', 'waiting') AND stock_move.picking_id = stock_picking.id AND stock_move.location_id = %s AND stock_move.location_dest_id = %s AND """ params = (location_from, location_to) if not procurement_group: query += "stock_picking.group_id IS NULL LIMIT 1" else: query += "stock_picking.group_id = %s LIMIT 1" params += (procurement_group,) cr.execute(query, params) [pick] = cr.fetchone() or [None] if not pick: move = self.browse(cr, uid, move_ids, context=context)[0] values = self._prepare_picking_assign(cr, uid, move, context=context) pick = pick_obj.create(cr, uid, values, context=context) return self.write(cr, uid, move_ids, {'picking_id': pick}, context=context) def onchange_date(self, cr, uid, ids, date, date_expected, context=None): """ On change of Scheduled Date gives a Move date. @param date_expected: Scheduled Date @param date: Move Date @return: Move Date """ if not date_expected: date_expected = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT) return {'value': {'date': date_expected}} def attribute_price(self, cr, uid, move, context=None): """ Attribute price to move, important in inter-company moves or receipts with only one partner """ if not move.price_unit: price = move.product_id.standard_price self.write(cr, uid, [move.id], {'price_unit': price}) def action_confirm(self, cr, uid, ids, context=None): """ Confirms stock move or put it in waiting if it's linked to another move. @return: List of ids. """ if not context: context = {} if isinstance(ids, (int, long)): ids = [ids] states = { 'confirmed': [], 'waiting': [] } to_assign = {} for move in self.browse(cr, uid, ids, context=context): self.attribute_price(cr, uid, move, context=context) state = 'confirmed' #if the move is preceeded, then it's waiting (if preceeding move is done, then action_assign has been called already and its state is already available) if move.move_orig_ids: state = 'waiting' #if the move is split and some of the ancestor was preceeded, then it's waiting as well elif move.split_from: move2 = move.split_from while move2 and state != 'waiting': if move2.move_orig_ids: state = 'waiting' move2 = move2.split_from states[state].append(move.id) if not move.picking_id and move.picking_type_id: key = (move.group_id.id, move.location_id.id, move.location_dest_id.id) if key not in to_assign: to_assign[key] = [] to_assign[key].append(move.id) moves = [move for move in self.browse(cr, uid, states['confirmed'], context=context) if move.procure_method == 'make_to_order'] self._create_procurements(cr, uid, moves, context=context) for move in moves: states['waiting'].append(move.id) states['confirmed'].remove(move.id) for state, write_ids in states.items(): if len(write_ids): self.write(cr, uid, write_ids, {'state': state}) #assign picking in batch for all confirmed move that share the same details for key, move_ids in to_assign.items(): procurement_group, location_from, location_to = key self._picking_assign(cr, uid, move_ids, procurement_group, location_from, location_to, context=context) moves = self.browse(cr, uid, ids, context=context) self._push_apply(cr, uid, moves, context=context) return ids def force_assign(self, cr, uid, ids, context=None): """ Changes the state to assigned. @return: True """ return self.write(cr, uid, ids, {'state': 'assigned'}, context=context) def check_tracking_product(self, cr, uid, product, lot_id, location, location_dest, context=None): check = False if product.track_all and not location_dest.usage == 'inventory': check = True elif product.track_incoming and location.usage in ('supplier', 'transit', 'inventory') and location_dest.usage == 'internal': check = True elif product.track_outgoing and location_dest.usage in ('customer', 'transit') and location.usage == 'internal': check = True if check and not lot_id: raise osv.except_osv(_('Warning!'), _('You must assign a serial number for the product %s') % (product.name)) def check_tracking(self, cr, uid, move, lot_id, context=None): """ Checks if serial number is assigned to stock move or not and raise an error if it had to. """ self.check_tracking_product(cr, uid, move.product_id, lot_id, move.location_id, move.location_dest_id, context=context) def action_assign(self, cr, uid, ids, context=None): """ Checks the product type and accordingly writes the state. """ context = context or {} quant_obj = self.pool.get("stock.quant") to_assign_moves = [] main_domain = {} todo_moves = [] operations = set() for move in self.browse(cr, uid, ids, context=context): if move.state not in ('confirmed', 'waiting', 'assigned'): continue if move.location_id.usage in ('supplier', 'inventory', 'production'): to_assign_moves.append(move.id) #in case the move is returned, we want to try to find quants before forcing the assignment if not move.origin_returned_move_id: continue if move.product_id.type == 'consu': to_assign_moves.append(move.id) continue else: todo_moves.append(move) #we always keep the quants already assigned and try to find the remaining quantity on quants not assigned only main_domain[move.id] = [('reservation_id', '=', False), ('qty', '>', 0)] #if the move is preceeded, restrict the choice of quants in the ones moved previously in original move ancestors = self.find_move_ancestors(cr, uid, move, context=context) if move.state == 'waiting' and not ancestors: #if the waiting move hasn't yet any ancestor (PO/MO not confirmed yet), don't find any quant available in stock main_domain[move.id] += [('id', '=', False)] elif ancestors: main_domain[move.id] += [('history_ids', 'in', ancestors)] #if the move is returned from another, restrict the choice of quants to the ones that follow the returned move if move.origin_returned_move_id: main_domain[move.id] += [('history_ids', 'in', move.origin_returned_move_id.id)] for link in move.linked_move_operation_ids: operations.add(link.operation_id) # Check all ops and sort them: we want to process first the packages, then operations with lot then the rest operations = list(operations) operations.sort(key=lambda x: ((x.package_id and not x.product_id) and -4 or 0) + (x.package_id and -2 or 0) + (x.lot_id and -1 or 0)) for ops in operations: #first try to find quants based on specific domains given by linked operations for record in ops.linked_move_operation_ids: move = record.move_id if move.id in main_domain: domain = main_domain[move.id] + self.pool.get('stock.move.operation.link').get_specific_domain(cr, uid, record, context=context) qty = record.qty if qty: quants = quant_obj.quants_get_prefered_domain(cr, uid, ops.location_id, move.product_id, qty, domain=domain, prefered_domain_list=[], restrict_lot_id=move.restrict_lot_id.id, restrict_partner_id=move.restrict_partner_id.id, context=context) quant_obj.quants_reserve(cr, uid, quants, move, record, context=context) for move in todo_moves: if move.linked_move_operation_ids: continue #then if the move isn't totally assigned, try to find quants without any specific domain if move.state != 'assigned': qty_already_assigned = move.reserved_availability qty = move.product_qty - qty_already_assigned quants = quant_obj.quants_get_prefered_domain(cr, uid, move.location_id, move.product_id, qty, domain=main_domain[move.id], prefered_domain_list=[], restrict_lot_id=move.restrict_lot_id.id, restrict_partner_id=move.restrict_partner_id.id, context=context) quant_obj.quants_reserve(cr, uid, quants, move, context=context) #force assignation of consumable products and incoming from supplier/inventory/production if to_assign_moves: self.force_assign(cr, uid, to_assign_moves, context=context) def action_cancel(self, cr, uid, ids, context=None): """ Cancels the moves and if all moves are cancelled it cancels the picking. @return: True """ procurement_obj = self.pool.get('procurement.order') context = context or {} procs_to_check = [] for move in self.browse(cr, uid, ids, context=context): if move.state == 'done': raise osv.except_osv(_('Operation Forbidden!'), _('You cannot cancel a stock move that has been set to \'Done\'.')) if move.reserved_quant_ids: self.pool.get("stock.quant").quants_unreserve(cr, uid, move, context=context) if context.get('cancel_procurement'): if move.propagate: procurement_ids = procurement_obj.search(cr, uid, [('move_dest_id', '=', move.id)], context=context) procurement_obj.cancel(cr, uid, procurement_ids, context=context) else: if move.move_dest_id: if move.propagate: self.action_cancel(cr, uid, [move.move_dest_id.id], context=context) elif move.move_dest_id.state == 'waiting': #If waiting, the chain will be broken and we are not sure if we can still wait for it (=> could take from stock instead) self.write(cr, uid, [move.move_dest_id.id], {'state': 'confirmed'}, context=context) if move.procurement_id: # Does the same as procurement check, only eliminating a refresh procs_to_check.append(move.procurement_id.id) res = self.write(cr, uid, ids, {'state': 'cancel', 'move_dest_id': False}, context=context) if procs_to_check: procurement_obj.check(cr, uid, procs_to_check, context=context) return res def _check_package_from_moves(self, cr, uid, ids, context=None): pack_obj = self.pool.get("stock.quant.package") packs = set() for move in self.browse(cr, uid, ids, context=context): packs |= set([q.package_id for q in move.quant_ids if q.package_id and q.qty > 0]) return pack_obj._check_location_constraint(cr, uid, list(packs), context=context) def find_move_ancestors(self, cr, uid, move, context=None): '''Find the first level ancestors of given move ''' ancestors = [] move2 = move while move2: ancestors += [x.id for x in move2.move_orig_ids] #loop on the split_from to find the ancestor of split moves only if the move has not direct ancestor (priority goes to them) move2 = not move2.move_orig_ids and move2.split_from or False return ancestors @api.cr_uid_ids_context def recalculate_move_state(self, cr, uid, move_ids, context=None): '''Recompute the state of moves given because their reserved quants were used to fulfill another operation''' for move in self.browse(cr, uid, move_ids, context=context): vals = {} reserved_quant_ids = move.reserved_quant_ids if len(reserved_quant_ids) > 0 and not move.partially_available: vals['partially_available'] = True if len(reserved_quant_ids) == 0 and move.partially_available: vals['partially_available'] = False if move.state == 'assigned': if self.find_move_ancestors(cr, uid, move, context=context): vals['state'] = 'waiting' else: vals['state'] = 'confirmed' if vals: self.write(cr, uid, [move.id], vals, context=context) def action_done(self, cr, uid, ids, context=None): """ Process completely the moves given as ids and if all moves are done, it will finish the picking. """ context = context or {} picking_obj = self.pool.get("stock.picking") quant_obj = self.pool.get("stock.quant") todo = [move.id for move in self.browse(cr, uid, ids, context=context) if move.state == "draft"] if todo: ids = self.action_confirm(cr, uid, todo, context=context) pickings = set() procurement_ids = set() #Search operations that are linked to the moves operations = set() move_qty = {} for move in self.browse(cr, uid, ids, context=context): move_qty[move.id] = move.product_qty for link in move.linked_move_operation_ids: operations.add(link.operation_id) #Sort operations according to entire packages first, then package + lot, package only, lot only operations = list(operations) operations.sort(key=lambda x: ((x.package_id and not x.product_id) and -4 or 0) + (x.package_id and -2 or 0) + (x.lot_id and -1 or 0)) for ops in operations: if ops.picking_id: pickings.add(ops.picking_id.id) main_domain = [('qty', '>', 0)] for record in ops.linked_move_operation_ids: move = record.move_id self.check_tracking(cr, uid, move, not ops.product_id and ops.package_id.id or ops.lot_id.id, context=context) prefered_domain = [('reservation_id', '=', move.id)] fallback_domain = [('reservation_id', '=', False)] fallback_domain2 = ['&', ('reservation_id', '!=', move.id), ('reservation_id', '!=', False)] prefered_domain_list = [prefered_domain] + [fallback_domain] + [fallback_domain2] dom = main_domain + self.pool.get('stock.move.operation.link').get_specific_domain(cr, uid, record, context=context) quants = quant_obj.quants_get_prefered_domain(cr, uid, ops.location_id, move.product_id, record.qty, domain=dom, prefered_domain_list=prefered_domain_list, restrict_lot_id=move.restrict_lot_id.id, restrict_partner_id=move.restrict_partner_id.id, context=context) if ops.product_id: #If a product is given, the result is always put immediately in the result package (if it is False, they are without package) quant_dest_package_id = ops.result_package_id.id ctx = context else: # When a pack is moved entirely, the quants should not be written anything for the destination package quant_dest_package_id = False ctx = context.copy() ctx['entire_pack'] = True quant_obj.quants_move(cr, uid, quants, move, ops.location_dest_id, location_from=ops.location_id, lot_id=ops.lot_id.id, owner_id=ops.owner_id.id, src_package_id=ops.package_id.id, dest_package_id=quant_dest_package_id, context=ctx) # Handle pack in pack if not ops.product_id and ops.package_id and ops.result_package_id.id != ops.package_id.parent_id.id: self.pool.get('stock.quant.package').write(cr, SUPERUSER_ID, [ops.package_id.id], {'parent_id': ops.result_package_id.id}, context=context) if not move_qty.get(move.id): raise osv.except_osv(_("Error"), _("The roundings of your Unit of Measures %s on the move vs. %s on the product don't allow to do these operations or you are not transferring the picking at once. ") % (move.product_uom.name, move.product_id.uom_id.name)) move_qty[move.id] -= record.qty #Check for remaining qtys and unreserve/check move_dest_id in move_dest_ids = set() for move in self.browse(cr, uid, ids, context=context): move_qty_cmp = float_compare(move_qty[move.id], 0, precision_rounding=move.product_id.uom_id.rounding) if move_qty_cmp > 0: # (=In case no pack operations in picking) main_domain = [('qty', '>', 0)] prefered_domain = [('reservation_id', '=', move.id)] fallback_domain = [('reservation_id', '=', False)] fallback_domain2 = ['&', ('reservation_id', '!=', move.id), ('reservation_id', '!=', False)] prefered_domain_list = [prefered_domain] + [fallback_domain] + [fallback_domain2] self.check_tracking(cr, uid, move, move.restrict_lot_id.id, context=context) qty = move_qty[move.id] quants = quant_obj.quants_get_prefered_domain(cr, uid, move.location_id, move.product_id, qty, domain=main_domain, prefered_domain_list=prefered_domain_list, restrict_lot_id=move.restrict_lot_id.id, restrict_partner_id=move.restrict_partner_id.id, context=context) quant_obj.quants_move(cr, uid, quants, move, move.location_dest_id, lot_id=move.restrict_lot_id.id, owner_id=move.restrict_partner_id.id, context=context) # If the move has a destination, add it to the list to reserve if move.move_dest_id and move.move_dest_id.state in ('waiting', 'confirmed'): move_dest_ids.add(move.move_dest_id.id) if move.procurement_id: procurement_ids.add(move.procurement_id.id) #unreserve the quants and make them available for other operations/moves quant_obj.quants_unreserve(cr, uid, move, context=context) # Check the packages have been placed in the correct locations self._check_package_from_moves(cr, uid, ids, context=context) #set the move as done self.write(cr, uid, ids, {'state': 'done', 'date': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)}, context=context) self.pool.get('procurement.order').check(cr, uid, list(procurement_ids), context=context) #assign destination moves if move_dest_ids: self.action_assign(cr, uid, list(move_dest_ids), context=context) #check picking state to set the date_done is needed done_picking = [] for picking in picking_obj.browse(cr, uid, list(pickings), context=context): if picking.state == 'done' and not picking.date_done: done_picking.append(picking.id) if done_picking: picking_obj.write(cr, uid, done_picking, {'date_done': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)}, context=context) return True def unlink(self, cr, uid, ids, context=None): context = context or {} for move in self.browse(cr, uid, ids, context=context): if move.state not in ('draft', 'cancel'): raise osv.except_osv(_('User Error!'), _('You can only delete draft moves.')) return super(stock_move, self).unlink(cr, uid, ids, context=context) def action_scrap(self, cr, uid, ids, quantity, location_id, restrict_lot_id=False, restrict_partner_id=False, context=None): """ Move the scrap/damaged product into scrap location @param cr: the database cursor @param uid: the user id @param ids: ids of stock move object to be scrapped @param quantity : specify scrap qty @param location_id : specify scrap location @param context: context arguments @return: Scraped lines """ quant_obj = self.pool.get("stock.quant") #quantity should be given in MOVE UOM if quantity <= 0: raise osv.except_osv(_('Warning!'), _('Please provide a positive quantity to scrap.')) res = [] for move in self.browse(cr, uid, ids, context=context): source_location = move.location_id if move.state == 'done': source_location = move.location_dest_id #Previously used to prevent scraping from virtual location but not necessary anymore #if source_location.usage != 'internal': #restrict to scrap from a virtual location because it's meaningless and it may introduce errors in stock ('creating' new products from nowhere) #raise osv.except_osv(_('Error!'), _('Forbidden operation: it is not allowed to scrap products from a virtual location.')) move_qty = move.product_qty uos_qty = quantity / move_qty * move.product_uos_qty default_val = { 'location_id': source_location.id, 'product_uom_qty': quantity, 'product_uos_qty': uos_qty, 'state': move.state, 'scrapped': True, 'location_dest_id': location_id, 'restrict_lot_id': restrict_lot_id, 'restrict_partner_id': restrict_partner_id, } new_move = self.copy(cr, uid, move.id, default_val) res += [new_move] product_obj = self.pool.get('product.product') for product in product_obj.browse(cr, uid, [move.product_id.id], context=context): if move.picking_id: uom = product.uom_id.name if product.uom_id else '' message = _("%s %s %s has been <b>moved to</b> scrap.") % (quantity, uom, product.name) move.picking_id.message_post(body=message) # We "flag" the quant from which we want to scrap the products. To do so: # - we select the quants related to the move we scrap from # - we reserve the quants with the scrapped move # See self.action_done, et particularly how is defined the "prefered_domain" for clarification scrap_move = self.browse(cr, uid, new_move, context=context) if move.state == 'done' and scrap_move.location_id.usage not in ('supplier', 'inventory', 'production'): domain = [('qty', '>', 0), ('history_ids', 'in', [move.id])] # We use scrap_move data since a reservation makes sense for a move not already done quants = quant_obj.quants_get_prefered_domain(cr, uid, scrap_move.location_id, scrap_move.product_id, quantity, domain=domain, prefered_domain_list=[], restrict_lot_id=scrap_move.restrict_lot_id.id, restrict_partner_id=scrap_move.restrict_partner_id.id, context=context) quant_obj.quants_reserve(cr, uid, quants, scrap_move, context=context) self.action_done(cr, uid, res, context=context) return res def split(self, cr, uid, move, qty, restrict_lot_id=False, restrict_partner_id=False, context=None): """ Splits qty from move move into a new move :param move: browse record :param qty: float. quantity to split (given in product UoM) :param restrict_lot_id: optional production lot that can be given in order to force the new move to restrict its choice of quants to this lot. :param restrict_partner_id: optional partner that can be given in order to force the new move to restrict its choice of quants to the ones belonging to this partner. :param context: dictionay. can contains the special key 'source_location_id' in order to force the source location when copying the move returns the ID of the backorder move created """ if move.state in ('done', 'cancel'): raise osv.except_osv(_('Error'), _('You cannot split a move done')) if move.state == 'draft': #we restrict the split of a draft move because if not confirmed yet, it may be replaced by several other moves in #case of phantom bom (with mrp module). And we don't want to deal with this complexity by copying the product that will explode. raise osv.except_osv(_('Error'), _('You cannot split a draft move. It needs to be confirmed first.')) if move.product_qty <= qty or qty == 0: return move.id uom_obj = self.pool.get('product.uom') context = context or {} #HALF-UP rounding as only rounding errors will be because of propagation of error from default UoM uom_qty = uom_obj._compute_qty_obj(cr, uid, move.product_id.uom_id, qty, move.product_uom, rounding_method='HALF-UP', context=context) uos_qty = uom_qty * move.product_uos_qty / move.product_uom_qty defaults = { 'product_uom_qty': uom_qty, 'product_uos_qty': uos_qty, 'procure_method': 'make_to_stock', 'restrict_lot_id': restrict_lot_id, 'restrict_partner_id': restrict_partner_id, 'split_from': move.id, 'procurement_id': move.procurement_id.id, 'move_dest_id': move.move_dest_id.id, 'origin_returned_move_id': move.origin_returned_move_id.id, } if context.get('source_location_id'): defaults['location_id'] = context['source_location_id'] new_move = self.copy(cr, uid, move.id, defaults, context=context) ctx = context.copy() ctx['do_not_propagate'] = True self.write(cr, uid, [move.id], { 'product_uom_qty': move.product_uom_qty - uom_qty, 'product_uos_qty': move.product_uos_qty - uos_qty, }, context=ctx) if move.move_dest_id and move.propagate and move.move_dest_id.state not in ('done', 'cancel'): new_move_prop = self.split(cr, uid, move.move_dest_id, qty, context=context) self.write(cr, uid, [new_move], {'move_dest_id': new_move_prop}, context=context) #returning the first element of list returned by action_confirm is ok because we checked it wouldn't be exploded (and #thus the result of action_confirm should always be a list of 1 element length) return self.action_confirm(cr, uid, [new_move], context=context)[0] def get_code_from_locs(self, cr, uid, move, location_id=False, location_dest_id=False, context=None): """ Returns the code the picking type should have. This can easily be used to check if a move is internal or not move, location_id and location_dest_id are browse records """ code = 'internal' src_loc = location_id or move.location_id dest_loc = location_dest_id or move.location_dest_id if src_loc.usage == 'internal' and dest_loc.usage != 'internal': code = 'outgoing' if src_loc.usage != 'internal' and dest_loc.usage == 'internal': code = 'incoming' return code def _get_taxes(self, cr, uid, move, context=None): return [] class stock_inventory(osv.osv): _name = "stock.inventory" _description = "Inventory" def _get_move_ids_exist(self, cr, uid, ids, field_name, arg, context=None): res = {} for inv in self.browse(cr, uid, ids, context=context): res[inv.id] = False if inv.move_ids: res[inv.id] = True return res def _get_available_filters(self, cr, uid, context=None): """ This function will return the list of filter allowed according to the options checked in 'Settings\Warehouse'. :rtype: list of tuple """ #default available choices res_filter = [('none', _('All products')), ('partial', _('Manual Selection of Products')), ('product', _('One product only'))] settings_obj = self.pool.get('stock.config.settings') config_ids = settings_obj.search(cr, uid, [], limit=1, order='id DESC', context=context) #If we don't have updated config until now, all fields are by default false and so should be not dipslayed if not config_ids: return res_filter stock_settings = settings_obj.browse(cr, uid, config_ids[0], context=context) if stock_settings.group_stock_tracking_owner: res_filter.append(('owner', _('One owner only'))) res_filter.append(('product_owner', _('One product for a specific owner'))) if stock_settings.group_stock_production_lot: res_filter.append(('lot', _('One Lot/Serial Number'))) if stock_settings.group_stock_tracking_lot: res_filter.append(('pack', _('A Pack'))) return res_filter def _get_total_qty(self, cr, uid, ids, field_name, args, context=None): res = {} for inv in self.browse(cr, uid, ids, context=context): res[inv.id] = sum([x.product_qty for x in inv.line_ids]) return res INVENTORY_STATE_SELECTION = [ ('draft', 'Draft'), ('cancel', 'Cancelled'), ('confirm', 'In Progress'), ('done', 'Validated'), ] _columns = { 'name': fields.char('Inventory Reference', required=True, readonly=True, states={'draft': [('readonly', False)]}, help="Inventory Name."), 'date': fields.datetime('Inventory Date', required=True, readonly=True, help="The date that will be used for the stock level check of the products and the validation of the stock move related to this inventory."), 'line_ids': fields.one2many('stock.inventory.line', 'inventory_id', 'Inventories', readonly=False, states={'done': [('readonly', True)]}, help="Inventory Lines.", copy=True), 'move_ids': fields.one2many('stock.move', 'inventory_id', 'Created Moves', help="Inventory Moves.", states={'done': [('readonly', True)]}), 'state': fields.selection(INVENTORY_STATE_SELECTION, 'Status', readonly=True, select=True, copy=False), 'company_id': fields.many2one('res.company', 'Company', required=True, select=True, readonly=True, states={'draft': [('readonly', False)]}), 'location_id': fields.many2one('stock.location', 'Inventoried Location', required=True, readonly=True, states={'draft': [('readonly', False)]}), 'product_id': fields.many2one('product.product', 'Inventoried Product', readonly=True, states={'draft': [('readonly', False)]}, help="Specify Product to focus your inventory on a particular Product."), 'package_id': fields.many2one('stock.quant.package', 'Inventoried Pack', readonly=True, states={'draft': [('readonly', False)]}, help="Specify Pack to focus your inventory on a particular Pack."), 'partner_id': fields.many2one('res.partner', 'Inventoried Owner', readonly=True, states={'draft': [('readonly', False)]}, help="Specify Owner to focus your inventory on a particular Owner."), 'lot_id': fields.many2one('stock.production.lot', 'Inventoried Lot/Serial Number', readonly=True, states={'draft': [('readonly', False)]}, help="Specify Lot/Serial Number to focus your inventory on a particular Lot/Serial Number.", copy=False), 'move_ids_exist': fields.function(_get_move_ids_exist, type='boolean', string=' Stock Move Exists?', help='technical field for attrs in view'), 'filter': fields.selection(_get_available_filters, 'Inventory of', required=True, help="If you do an entire inventory, you can choose 'All Products' and it will prefill the inventory with the current stock. If you only do some products "\ "(e.g. Cycle Counting) you can choose 'Manual Selection of Products' and the system won't propose anything. You can also let the "\ "system propose for a single product / lot /... "), 'total_qty': fields.function(_get_total_qty, type="float"), } def _default_stock_location(self, cr, uid, context=None): try: warehouse = self.pool.get('ir.model.data').get_object(cr, uid, 'stock', 'warehouse0') return warehouse.lot_stock_id.id except: return False _defaults = { 'date': fields.datetime.now, 'state': 'draft', 'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.inventory', context=c), 'location_id': _default_stock_location, 'filter': 'none', } def reset_real_qty(self, cr, uid, ids, context=None): inventory = self.browse(cr, uid, ids[0], context=context) line_ids = [line.id for line in inventory.line_ids] self.pool.get('stock.inventory.line').write(cr, uid, line_ids, {'product_qty': 0}) return True def action_done(self, cr, uid, ids, context=None): """ Finish the inventory @return: True """ for inv in self.browse(cr, uid, ids, context=context): for inventory_line in inv.line_ids: if inventory_line.product_qty < 0 and inventory_line.product_qty != inventory_line.theoretical_qty: raise osv.except_osv(_('Warning'), _('You cannot set a negative product quantity in an inventory line:\n\t%s - qty: %s' % (inventory_line.product_id.name, inventory_line.product_qty))) self.action_check(cr, uid, [inv.id], context=context) self.write(cr, uid, [inv.id], {'state': 'done'}, context=context) self.post_inventory(cr, uid, inv, context=context) return True def post_inventory(self, cr, uid, inv, context=None): #The inventory is posted as a single step which means quants cannot be moved from an internal location to another using an inventory #as they will be moved to inventory loss, and other quants will be created to the encoded quant location. This is a normal behavior #as quants cannot be reuse from inventory location (users can still manually move the products before/after the inventory if they want). move_obj = self.pool.get('stock.move') move_obj.action_done(cr, uid, [x.id for x in inv.move_ids if x.state != 'done'], context=context) def action_check(self, cr, uid, ids, context=None): """ Checks the inventory and computes the stock move to do @return: True """ inventory_line_obj = self.pool.get('stock.inventory.line') stock_move_obj = self.pool.get('stock.move') for inventory in self.browse(cr, uid, ids, context=context): #first remove the existing stock moves linked to this inventory move_ids = [move.id for move in inventory.move_ids] stock_move_obj.unlink(cr, uid, move_ids, context=context) for line in inventory.line_ids: #compare the checked quantities on inventory lines to the theorical one stock_move = inventory_line_obj._resolve_inventory_line(cr, uid, line, context=context) def action_cancel_draft(self, cr, uid, ids, context=None): """ Cancels the stock move and change inventory state to draft. @return: True """ for inv in self.browse(cr, uid, ids, context=context): self.write(cr, uid, [inv.id], {'line_ids': [(5,)]}, context=context) self.pool.get('stock.move').action_cancel(cr, uid, [x.id for x in inv.move_ids], context=context) self.write(cr, uid, [inv.id], {'state': 'draft'}, context=context) return True def action_cancel_inventory(self, cr, uid, ids, context=None): self.action_cancel_draft(cr, uid, ids, context=context) def prepare_inventory(self, cr, uid, ids, context=None): inventory_line_obj = self.pool.get('stock.inventory.line') for inventory in self.browse(cr, uid, ids, context=context): # If there are inventory lines already (e.g. from import), respect those and set their theoretical qty line_ids = [line.id for line in inventory.line_ids] if not line_ids and inventory.filter != 'partial': #compute the inventory lines and create them vals = self._get_inventory_lines(cr, uid, inventory, context=context) for product_line in vals: inventory_line_obj.create(cr, uid, product_line, context=context) return self.write(cr, uid, ids, {'state': 'confirm', 'date': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)}) def _get_inventory_lines(self, cr, uid, inventory, context=None): location_obj = self.pool.get('stock.location') product_obj = self.pool.get('product.product') location_ids = location_obj.search(cr, uid, [('id', 'child_of', [inventory.location_id.id])], context=context) domain = ' location_id in %s' args = (tuple(location_ids),) if inventory.partner_id: domain += ' and owner_id = %s' args += (inventory.partner_id.id,) if inventory.lot_id: domain += ' and lot_id = %s' args += (inventory.lot_id.id,) if inventory.product_id: domain += ' and product_id = %s' args += (inventory.product_id.id,) if inventory.package_id: domain += ' and package_id = %s' args += (inventory.package_id.id,) cr.execute(''' SELECT product_id, sum(qty) as product_qty, location_id, lot_id as prod_lot_id, package_id, owner_id as partner_id FROM stock_quant WHERE''' + domain + ''' GROUP BY product_id, location_id, lot_id, package_id, partner_id ''', args) vals = [] for product_line in cr.dictfetchall(): #replace the None the dictionary by False, because falsy values are tested later on for key, value in product_line.items(): if not value: product_line[key] = False product_line['inventory_id'] = inventory.id product_line['theoretical_qty'] = product_line['product_qty'] if product_line['product_id']: product = product_obj.browse(cr, uid, product_line['product_id'], context=context) product_line['product_uom_id'] = product.uom_id.id vals.append(product_line) return vals def _check_filter_product(self, cr, uid, ids, context=None): for inventory in self.browse(cr, uid, ids, context=context): if inventory.filter == 'none' and inventory.product_id and inventory.location_id and inventory.lot_id: return True if inventory.filter not in ('product', 'product_owner') and inventory.product_id: return False if inventory.filter != 'lot' and inventory.lot_id: return False if inventory.filter not in ('owner', 'product_owner') and inventory.partner_id: return False if inventory.filter != 'pack' and inventory.package_id: return False return True def onchange_filter(self, cr, uid, ids, filter, context=None): to_clean = { 'value': {} } if filter not in ('product', 'product_owner'): to_clean['value']['product_id'] = False if filter != 'lot': to_clean['value']['lot_id'] = False if filter not in ('owner', 'product_owner'): to_clean['value']['partner_id'] = False if filter != 'pack': to_clean['value']['package_id'] = False return to_clean _constraints = [ (_check_filter_product, 'The selected inventory options are not coherent.', ['filter', 'product_id', 'lot_id', 'partner_id', 'package_id']), ] class stock_inventory_line(osv.osv): _name = "stock.inventory.line" _description = "Inventory Line" _order = "inventory_id, location_name, product_code, product_name, prodlot_name" def _get_product_name_change(self, cr, uid, ids, context=None): return self.pool.get('stock.inventory.line').search(cr, uid, [('product_id', 'in', ids)], context=context) def _get_location_change(self, cr, uid, ids, context=None): return self.pool.get('stock.inventory.line').search(cr, uid, [('location_id', 'in', ids)], context=context) def _get_prodlot_change(self, cr, uid, ids, context=None): return self.pool.get('stock.inventory.line').search(cr, uid, [('prod_lot_id', 'in', ids)], context=context) def _get_theoretical_qty(self, cr, uid, ids, name, args, context=None): res = {} quant_obj = self.pool["stock.quant"] uom_obj = self.pool["product.uom"] for line in self.browse(cr, uid, ids, context=context): quant_ids = self._get_quants(cr, uid, line, context=context) quants = quant_obj.browse(cr, uid, quant_ids, context=context) tot_qty = sum([x.qty for x in quants]) if line.product_uom_id and line.product_id.uom_id.id != line.product_uom_id.id: tot_qty = uom_obj._compute_qty_obj(cr, uid, line.product_id.uom_id, tot_qty, line.product_uom_id, context=context) res[line.id] = tot_qty return res _columns = { 'inventory_id': fields.many2one('stock.inventory', 'Inventory', ondelete='cascade', select=True), 'location_id': fields.many2one('stock.location', 'Location', required=True, select=True), 'product_id': fields.many2one('product.product', 'Product', required=True, select=True), 'package_id': fields.many2one('stock.quant.package', 'Pack', select=True), 'product_uom_id': fields.many2one('product.uom', 'Product Unit of Measure', required=True), 'product_qty': fields.float('Checked Quantity', digits_compute=dp.get_precision('Product Unit of Measure')), 'company_id': fields.related('inventory_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, select=True, readonly=True), 'prod_lot_id': fields.many2one('stock.production.lot', 'Serial Number', domain="[('product_id','=',product_id)]"), 'state': fields.related('inventory_id', 'state', type='char', string='Status', readonly=True), 'theoretical_qty': fields.function(_get_theoretical_qty, type='float', digits_compute=dp.get_precision('Product Unit of Measure'), store={'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['location_id', 'product_id', 'package_id', 'product_uom_id', 'company_id', 'prod_lot_id', 'partner_id'], 20),}, readonly=True, string="Theoretical Quantity"), 'partner_id': fields.many2one('res.partner', 'Owner'), 'product_name': fields.related('product_id', 'name', type='char', string='Product Name', store={ 'product.product': (_get_product_name_change, ['name', 'default_code'], 20), 'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['product_id'], 20),}), 'product_code': fields.related('product_id', 'default_code', type='char', string='Product Code', store={ 'product.product': (_get_product_name_change, ['name', 'default_code'], 20), 'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['product_id'], 20),}), 'location_name': fields.related('location_id', 'complete_name', type='char', string='Location Name', store={ 'stock.location': (_get_location_change, ['name', 'location_id', 'active'], 20), 'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['location_id'], 20),}), 'prodlot_name': fields.related('prod_lot_id', 'name', type='char', string='Serial Number Name', store={ 'stock.production.lot': (_get_prodlot_change, ['name'], 20), 'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['prod_lot_id'], 20),}), } _defaults = { 'product_qty': 0, 'product_uom_id': lambda self, cr, uid, ctx=None: self.pool['ir.model.data'].get_object_reference(cr, uid, 'product', 'product_uom_unit')[1] } def _get_quants(self, cr, uid, line, context=None): quant_obj = self.pool["stock.quant"] dom = [('company_id', '=', line.company_id.id), ('location_id', '=', line.location_id.id), ('lot_id', '=', line.prod_lot_id.id), ('product_id','=', line.product_id.id), ('owner_id', '=', line.partner_id.id), ('package_id', '=', line.package_id.id)] quants = quant_obj.search(cr, uid, dom, context=context) return quants def onchange_createline(self, cr, uid, ids, location_id=False, product_id=False, uom_id=False, package_id=False, prod_lot_id=False, partner_id=False, company_id=False, context=None): quant_obj = self.pool["stock.quant"] uom_obj = self.pool["product.uom"] res = {'value': {}} # If no UoM already put the default UoM of the product if product_id: product = self.pool.get('product.product').browse(cr, uid, product_id, context=context) uom = self.pool['product.uom'].browse(cr, uid, uom_id, context=context) if product.uom_id.category_id.id != uom.category_id.id: res['value']['product_uom_id'] = product.uom_id.id res['domain'] = {'product_uom_id': [('category_id','=',product.uom_id.category_id.id)]} uom_id = product.uom_id.id # Calculate theoretical quantity by searching the quants as in quants_get if product_id and location_id: product = self.pool.get('product.product').browse(cr, uid, product_id, context=context) if not company_id: company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id dom = [('company_id', '=', company_id), ('location_id', '=', location_id), ('lot_id', '=', prod_lot_id), ('product_id','=', product_id), ('owner_id', '=', partner_id), ('package_id', '=', package_id)] quants = quant_obj.search(cr, uid, dom, context=context) th_qty = sum([x.qty for x in quant_obj.browse(cr, uid, quants, context=context)]) if product_id and uom_id and product.uom_id.id != uom_id: th_qty = uom_obj._compute_qty(cr, uid, product.uom_id.id, th_qty, uom_id) res['value']['theoretical_qty'] = th_qty res['value']['product_qty'] = th_qty return res def _resolve_inventory_line(self, cr, uid, inventory_line, context=None): stock_move_obj = self.pool.get('stock.move') quant_obj = self.pool.get('stock.quant') diff = inventory_line.theoretical_qty - inventory_line.product_qty if not diff: return #each theorical_lines where difference between theoretical and checked quantities is not 0 is a line for which we need to create a stock move vals = { 'name': _('INV:') + (inventory_line.inventory_id.name or ''), 'product_id': inventory_line.product_id.id, 'product_uom': inventory_line.product_uom_id.id, 'date': inventory_line.inventory_id.date, 'company_id': inventory_line.inventory_id.company_id.id, 'inventory_id': inventory_line.inventory_id.id, 'state': 'confirmed', 'restrict_lot_id': inventory_line.prod_lot_id.id, 'restrict_partner_id': inventory_line.partner_id.id, } inventory_location_id = inventory_line.product_id.property_stock_inventory.id if diff < 0: #found more than expected vals['location_id'] = inventory_location_id vals['location_dest_id'] = inventory_line.location_id.id vals['product_uom_qty'] = -diff else: #found less than expected vals['location_id'] = inventory_line.location_id.id vals['location_dest_id'] = inventory_location_id vals['product_uom_qty'] = diff move_id = stock_move_obj.create(cr, uid, vals, context=context) move = stock_move_obj.browse(cr, uid, move_id, context=context) if diff > 0: domain = [('qty', '>', 0.0), ('package_id', '=', inventory_line.package_id.id), ('lot_id', '=', inventory_line.prod_lot_id.id), ('location_id', '=', inventory_line.location_id.id)] preferred_domain_list = [[('reservation_id', '=', False)], [('reservation_id.inventory_id', '!=', inventory_line.inventory_id.id)]] quants = quant_obj.quants_get_prefered_domain(cr, uid, move.location_id, move.product_id, move.product_qty, domain=domain, prefered_domain_list=preferred_domain_list, restrict_partner_id=move.restrict_partner_id.id, context=context) quant_obj.quants_reserve(cr, uid, quants, move, context=context) elif inventory_line.package_id: stock_move_obj.action_done(cr, uid, move_id, context=context) quants = [x.id for x in move.quant_ids] quant_obj.write(cr, uid, quants, {'package_id': inventory_line.package_id.id}, context=context) res = quant_obj.search(cr, uid, [('qty', '<', 0.0), ('product_id', '=', move.product_id.id), ('location_id', '=', move.location_dest_id.id), ('package_id', '!=', False)], limit=1, context=context) if res: for quant in move.quant_ids: if quant.location_id.id == move.location_dest_id.id: #To avoid we take a quant that was reconcile already quant_obj._quant_reconcile_negative(cr, uid, quant, move, context=context) return move_id # Should be left out in next version def restrict_change(self, cr, uid, ids, theoretical_qty, context=None): return {} # Should be left out in next version def on_change_product_id(self, cr, uid, ids, product, uom, theoretical_qty, context=None): """ Changes UoM @param location_id: Location id @param product: Changed product_id @param uom: UoM product @return: Dictionary of changed values """ if not product: return {'value': {'product_uom_id': False}} obj_product = self.pool.get('product.product').browse(cr, uid, product, context=context) return {'value': {'product_uom_id': uom or obj_product.uom_id.id}} #---------------------------------------------------------- # Stock Warehouse #---------------------------------------------------------- class stock_warehouse(osv.osv): _name = "stock.warehouse" _description = "Warehouse" _columns = { 'name': fields.char('Warehouse Name', required=True, select=True), 'company_id': fields.many2one('res.company', 'Company', required=True, readonly=True, select=True), 'partner_id': fields.many2one('res.partner', 'Address'), 'view_location_id': fields.many2one('stock.location', 'View Location', required=True, domain=[('usage', '=', 'view')]), 'lot_stock_id': fields.many2one('stock.location', 'Location Stock', domain=[('usage', '=', 'internal')], required=True), 'code': fields.char('Short Name', size=5, required=True, help="Short name used to identify your warehouse"), 'route_ids': fields.many2many('stock.location.route', 'stock_route_warehouse', 'warehouse_id', 'route_id', 'Routes', domain="[('warehouse_selectable', '=', True)]", help='Defaults routes through the warehouse'), 'reception_steps': fields.selection([ ('one_step', 'Receive goods directly in stock (1 step)'), ('two_steps', 'Unload in input location then go to stock (2 steps)'), ('three_steps', 'Unload in input location, go through a quality control before being admitted in stock (3 steps)')], 'Incoming Shipments', help="Default incoming route to follow", required=True), 'delivery_steps': fields.selection([ ('ship_only', 'Ship directly from stock (Ship only)'), ('pick_ship', 'Bring goods to output location before shipping (Pick + Ship)'), ('pick_pack_ship', 'Make packages into a dedicated location, then bring them to the output location for shipping (Pick + Pack + Ship)')], 'Outgoing Shippings', help="Default outgoing route to follow", required=True), 'wh_input_stock_loc_id': fields.many2one('stock.location', 'Input Location'), 'wh_qc_stock_loc_id': fields.many2one('stock.location', 'Quality Control Location'), 'wh_output_stock_loc_id': fields.many2one('stock.location', 'Output Location'), 'wh_pack_stock_loc_id': fields.many2one('stock.location', 'Packing Location'), 'mto_pull_id': fields.many2one('procurement.rule', 'MTO rule'), 'pick_type_id': fields.many2one('stock.picking.type', 'Pick Type'), 'pack_type_id': fields.many2one('stock.picking.type', 'Pack Type'), 'out_type_id': fields.many2one('stock.picking.type', 'Out Type'), 'in_type_id': fields.many2one('stock.picking.type', 'In Type'), 'int_type_id': fields.many2one('stock.picking.type', 'Internal Type'), 'crossdock_route_id': fields.many2one('stock.location.route', 'Crossdock Route'), 'reception_route_id': fields.many2one('stock.location.route', 'Receipt Route'), 'delivery_route_id': fields.many2one('stock.location.route', 'Delivery Route'), 'resupply_from_wh': fields.boolean('Resupply From Other Warehouses', help='Unused field'), 'resupply_wh_ids': fields.many2many('stock.warehouse', 'stock_wh_resupply_table', 'supplied_wh_id', 'supplier_wh_id', 'Resupply Warehouses'), 'resupply_route_ids': fields.one2many('stock.location.route', 'supplied_wh_id', 'Resupply Routes', help="Routes will be created for these resupply warehouses and you can select them on products and product categories"), 'default_resupply_wh_id': fields.many2one('stock.warehouse', 'Default Resupply Warehouse', help="Goods will always be resupplied from this warehouse"), } def onchange_filter_default_resupply_wh_id(self, cr, uid, ids, default_resupply_wh_id, resupply_wh_ids, context=None): resupply_wh_ids = set([x['id'] for x in (self.resolve_2many_commands(cr, uid, 'resupply_wh_ids', resupply_wh_ids, ['id']))]) if default_resupply_wh_id: #If we are removing the default resupply, we don't have default_resupply_wh_id resupply_wh_ids.add(default_resupply_wh_id) resupply_wh_ids = list(resupply_wh_ids) return {'value': {'resupply_wh_ids': resupply_wh_ids}} def _get_external_transit_location(self, cr, uid, warehouse, context=None): ''' returns browse record of inter company transit location, if found''' data_obj = self.pool.get('ir.model.data') location_obj = self.pool.get('stock.location') try: inter_wh_loc = data_obj.get_object_reference(cr, uid, 'stock', 'stock_location_inter_wh')[1] except: return False return location_obj.browse(cr, uid, inter_wh_loc, context=context) def _get_inter_wh_route(self, cr, uid, warehouse, wh, context=None): return { 'name': _('%s: Supply Product from %s') % (warehouse.name, wh.name), 'warehouse_selectable': False, 'product_selectable': True, 'product_categ_selectable': True, 'supplied_wh_id': warehouse.id, 'supplier_wh_id': wh.id, } def _create_resupply_routes(self, cr, uid, warehouse, supplier_warehouses, default_resupply_wh, context=None): route_obj = self.pool.get('stock.location.route') pull_obj = self.pool.get('procurement.rule') #create route selectable on the product to resupply the warehouse from another one external_transit_location = self._get_external_transit_location(cr, uid, warehouse, context=context) internal_transit_location = warehouse.company_id.internal_transit_location_id input_loc = warehouse.wh_input_stock_loc_id if warehouse.reception_steps == 'one_step': input_loc = warehouse.lot_stock_id for wh in supplier_warehouses: transit_location = wh.company_id.id == warehouse.company_id.id and internal_transit_location or external_transit_location if transit_location: output_loc = wh.wh_output_stock_loc_id if wh.delivery_steps == 'ship_only': output_loc = wh.lot_stock_id # Create extra MTO rule (only for 'ship only' because in the other cases MTO rules already exists) mto_pull_vals = self._get_mto_pull_rule(cr, uid, wh, [(output_loc, transit_location, wh.out_type_id.id)], context=context)[0] pull_obj.create(cr, uid, mto_pull_vals, context=context) inter_wh_route_vals = self._get_inter_wh_route(cr, uid, warehouse, wh, context=context) inter_wh_route_id = route_obj.create(cr, uid, vals=inter_wh_route_vals, context=context) values = [(output_loc, transit_location, wh.out_type_id.id, wh), (transit_location, input_loc, warehouse.in_type_id.id, warehouse)] pull_rules_list = self._get_supply_pull_rules(cr, uid, wh.id, values, inter_wh_route_id, context=context) for pull_rule in pull_rules_list: pull_obj.create(cr, uid, vals=pull_rule, context=context) #if the warehouse is also set as default resupply method, assign this route automatically to the warehouse if default_resupply_wh and default_resupply_wh.id == wh.id: self.write(cr, uid, [warehouse.id, wh.id], {'route_ids': [(4, inter_wh_route_id)]}, context=context) _defaults = { 'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.inventory', context=c), 'reception_steps': 'one_step', 'delivery_steps': 'ship_only', } _sql_constraints = [ ('warehouse_name_uniq', 'unique(name, company_id)', 'The name of the warehouse must be unique per company!'), ('warehouse_code_uniq', 'unique(code, company_id)', 'The code of the warehouse must be unique per company!'), ] def _get_partner_locations(self, cr, uid, ids, context=None): ''' returns a tuple made of the browse record of customer location and the browse record of supplier location''' data_obj = self.pool.get('ir.model.data') location_obj = self.pool.get('stock.location') try: customer_loc = data_obj.get_object_reference(cr, uid, 'stock', 'stock_location_customers')[1] supplier_loc = data_obj.get_object_reference(cr, uid, 'stock', 'stock_location_suppliers')[1] except: customer_loc = location_obj.search(cr, uid, [('usage', '=', 'customer')], context=context) customer_loc = customer_loc and customer_loc[0] or False supplier_loc = location_obj.search(cr, uid, [('usage', '=', 'supplier')], context=context) supplier_loc = supplier_loc and supplier_loc[0] or False if not (customer_loc and supplier_loc): raise osv.except_osv(_('Error!'), _('Can\'t find any customer or supplier location.')) return location_obj.browse(cr, uid, [customer_loc, supplier_loc], context=context) def _location_used(self, cr, uid, location_id, warehouse, context=None): pull_obj = self.pool['procurement.rule'] push_obj = self.pool['stock.location.path'] pulls = pull_obj.search(cr, uid, ['&', ('route_id', 'not in', [x.id for x in warehouse.route_ids]), '|', ('location_src_id', '=', location_id), ('location_id', '=', location_id)], context=context) pushs = push_obj.search(cr, uid, ['&', ('route_id', 'not in', [x.id for x in warehouse.route_ids]), '|', ('location_from_id', '=', location_id), ('location_dest_id', '=', location_id)], context=context) if pulls or pushs: return True return False def switch_location(self, cr, uid, ids, warehouse, new_reception_step=False, new_delivery_step=False, context=None): location_obj = self.pool.get('stock.location') new_reception_step = new_reception_step or warehouse.reception_steps new_delivery_step = new_delivery_step or warehouse.delivery_steps if warehouse.reception_steps != new_reception_step: if not self._location_used(cr, uid, warehouse.wh_input_stock_loc_id.id, warehouse, context=context): location_obj.write(cr, uid, [warehouse.wh_input_stock_loc_id.id, warehouse.wh_qc_stock_loc_id.id], {'active': False}, context=context) if new_reception_step != 'one_step': location_obj.write(cr, uid, warehouse.wh_input_stock_loc_id.id, {'active': True}, context=context) if new_reception_step == 'three_steps': location_obj.write(cr, uid, warehouse.wh_qc_stock_loc_id.id, {'active': True}, context=context) if warehouse.delivery_steps != new_delivery_step: if not self._location_used(cr, uid, warehouse.wh_output_stock_loc_id.id, warehouse, context=context): location_obj.write(cr, uid, [warehouse.wh_output_stock_loc_id.id], {'active': False}, context=context) if not self._location_used(cr, uid, warehouse.wh_pack_stock_loc_id.id, warehouse, context=context): location_obj.write(cr, uid, [warehouse.wh_pack_stock_loc_id.id], {'active': False}, context=context) if new_delivery_step != 'ship_only': location_obj.write(cr, uid, warehouse.wh_output_stock_loc_id.id, {'active': True}, context=context) if new_delivery_step == 'pick_pack_ship': location_obj.write(cr, uid, warehouse.wh_pack_stock_loc_id.id, {'active': True}, context=context) return True def _get_reception_delivery_route(self, cr, uid, warehouse, route_name, context=None): return { 'name': self._format_routename(cr, uid, warehouse, route_name, context=context), 'product_categ_selectable': True, 'product_selectable': False, 'sequence': 10, } def _get_supply_pull_rules(self, cr, uid, supply_warehouse, values, new_route_id, context=None): pull_rules_list = [] for from_loc, dest_loc, pick_type_id, warehouse in values: pull_rules_list.append({ 'name': self._format_rulename(cr, uid, warehouse, from_loc, dest_loc, context=context), 'location_src_id': from_loc.id, 'location_id': dest_loc.id, 'route_id': new_route_id, 'action': 'move', 'picking_type_id': pick_type_id, 'procure_method': warehouse.lot_stock_id.id != from_loc.id and 'make_to_order' or 'make_to_stock', # first part of the resuply route is MTS 'warehouse_id': warehouse.id, 'propagate_warehouse_id': supply_warehouse, }) return pull_rules_list def _get_push_pull_rules(self, cr, uid, warehouse, active, values, new_route_id, context=None): first_rule = True push_rules_list = [] pull_rules_list = [] for from_loc, dest_loc, pick_type_id in values: push_rules_list.append({ 'name': self._format_rulename(cr, uid, warehouse, from_loc, dest_loc, context=context), 'location_from_id': from_loc.id, 'location_dest_id': dest_loc.id, 'route_id': new_route_id, 'auto': 'manual', 'picking_type_id': pick_type_id, 'active': active, 'warehouse_id': warehouse.id, }) pull_rules_list.append({ 'name': self._format_rulename(cr, uid, warehouse, from_loc, dest_loc, context=context), 'location_src_id': from_loc.id, 'location_id': dest_loc.id, 'route_id': new_route_id, 'action': 'move', 'picking_type_id': pick_type_id, 'procure_method': first_rule is True and 'make_to_stock' or 'make_to_order', 'active': active, 'warehouse_id': warehouse.id, }) first_rule = False return push_rules_list, pull_rules_list def _get_mto_route(self, cr, uid, context=None): route_obj = self.pool.get('stock.location.route') data_obj = self.pool.get('ir.model.data') try: mto_route_id = data_obj.get_object_reference(cr, uid, 'stock', 'route_warehouse0_mto')[1] except: mto_route_id = route_obj.search(cr, uid, [('name', 'like', _('Make To Order'))], context=context) mto_route_id = mto_route_id and mto_route_id[0] or False if not mto_route_id: raise osv.except_osv(_('Error!'), _('Can\'t find any generic Make To Order route.')) return mto_route_id def _check_remove_mto_resupply_rules(self, cr, uid, warehouse, context=None): """ Checks that the moves from the different """ pull_obj = self.pool.get('procurement.rule') mto_route_id = self._get_mto_route(cr, uid, context=context) rules = pull_obj.search(cr, uid, ['&', ('location_src_id', '=', warehouse.lot_stock_id.id), ('location_id.usage', '=', 'transit')], context=context) pull_obj.unlink(cr, uid, rules, context=context) def _get_mto_pull_rule(self, cr, uid, warehouse, values, context=None): mto_route_id = self._get_mto_route(cr, uid, context=context) res = [] for value in values: from_loc, dest_loc, pick_type_id = value res += [{ 'name': self._format_rulename(cr, uid, warehouse, from_loc, dest_loc, context=context) + _(' MTO'), 'location_src_id': from_loc.id, 'location_id': dest_loc.id, 'route_id': mto_route_id, 'action': 'move', 'picking_type_id': pick_type_id, 'procure_method': 'make_to_order', 'active': True, 'warehouse_id': warehouse.id, }] return res def _get_crossdock_route(self, cr, uid, warehouse, route_name, context=None): return { 'name': self._format_routename(cr, uid, warehouse, route_name, context=context), 'warehouse_selectable': False, 'product_selectable': True, 'product_categ_selectable': True, 'active': warehouse.delivery_steps != 'ship_only' and warehouse.reception_steps != 'one_step', 'sequence': 20, } def create_routes(self, cr, uid, ids, warehouse, context=None): wh_route_ids = [] route_obj = self.pool.get('stock.location.route') pull_obj = self.pool.get('procurement.rule') push_obj = self.pool.get('stock.location.path') routes_dict = self.get_routes_dict(cr, uid, ids, warehouse, context=context) #create reception route and rules route_name, values = routes_dict[warehouse.reception_steps] route_vals = self._get_reception_delivery_route(cr, uid, warehouse, route_name, context=context) reception_route_id = route_obj.create(cr, uid, route_vals, context=context) wh_route_ids.append((4, reception_route_id)) push_rules_list, pull_rules_list = self._get_push_pull_rules(cr, uid, warehouse, True, values, reception_route_id, context=context) #create the push/pull rules for push_rule in push_rules_list: push_obj.create(cr, uid, vals=push_rule, context=context) for pull_rule in pull_rules_list: #all pull rules in reception route are mto, because we don't want to wait for the scheduler to trigger an orderpoint on input location pull_rule['procure_method'] = 'make_to_order' pull_obj.create(cr, uid, vals=pull_rule, context=context) #create MTS route and pull rules for delivery and a specific route MTO to be set on the product route_name, values = routes_dict[warehouse.delivery_steps] route_vals = self._get_reception_delivery_route(cr, uid, warehouse, route_name, context=context) #create the route and its pull rules delivery_route_id = route_obj.create(cr, uid, route_vals, context=context) wh_route_ids.append((4, delivery_route_id)) dummy, pull_rules_list = self._get_push_pull_rules(cr, uid, warehouse, True, values, delivery_route_id, context=context) for pull_rule in pull_rules_list: pull_obj.create(cr, uid, vals=pull_rule, context=context) #create MTO pull rule and link it to the generic MTO route mto_pull_vals = self._get_mto_pull_rule(cr, uid, warehouse, values, context=context)[0] mto_pull_id = pull_obj.create(cr, uid, mto_pull_vals, context=context) #create a route for cross dock operations, that can be set on products and product categories route_name, values = routes_dict['crossdock'] crossdock_route_vals = self._get_crossdock_route(cr, uid, warehouse, route_name, context=context) crossdock_route_id = route_obj.create(cr, uid, vals=crossdock_route_vals, context=context) wh_route_ids.append((4, crossdock_route_id)) dummy, pull_rules_list = self._get_push_pull_rules(cr, uid, warehouse, warehouse.delivery_steps != 'ship_only' and warehouse.reception_steps != 'one_step', values, crossdock_route_id, context=context) for pull_rule in pull_rules_list: # Fixed cross-dock is logically mto pull_rule['procure_method'] = 'make_to_order' pull_obj.create(cr, uid, vals=pull_rule, context=context) #create route selectable on the product to resupply the warehouse from another one self._create_resupply_routes(cr, uid, warehouse, warehouse.resupply_wh_ids, warehouse.default_resupply_wh_id, context=context) #return routes and mto pull rule to store on the warehouse return { 'route_ids': wh_route_ids, 'mto_pull_id': mto_pull_id, 'reception_route_id': reception_route_id, 'delivery_route_id': delivery_route_id, 'crossdock_route_id': crossdock_route_id, } def change_route(self, cr, uid, ids, warehouse, new_reception_step=False, new_delivery_step=False, context=None): picking_type_obj = self.pool.get('stock.picking.type') pull_obj = self.pool.get('procurement.rule') push_obj = self.pool.get('stock.location.path') route_obj = self.pool.get('stock.location.route') new_reception_step = new_reception_step or warehouse.reception_steps new_delivery_step = new_delivery_step or warehouse.delivery_steps #change the default source and destination location and (de)activate picking types input_loc = warehouse.wh_input_stock_loc_id if new_reception_step == 'one_step': input_loc = warehouse.lot_stock_id output_loc = warehouse.wh_output_stock_loc_id if new_delivery_step == 'ship_only': output_loc = warehouse.lot_stock_id picking_type_obj.write(cr, uid, warehouse.in_type_id.id, {'default_location_dest_id': input_loc.id}, context=context) picking_type_obj.write(cr, uid, warehouse.out_type_id.id, {'default_location_src_id': output_loc.id}, context=context) picking_type_obj.write(cr, uid, warehouse.pick_type_id.id, { 'active': new_delivery_step != 'ship_only', 'default_location_dest_id': output_loc.id if new_delivery_step == 'pick_ship' else warehouse.wh_pack_stock_loc_id.id, }, context=context) picking_type_obj.write(cr, uid, warehouse.pack_type_id.id, {'active': new_delivery_step == 'pick_pack_ship'}, context=context) routes_dict = self.get_routes_dict(cr, uid, ids, warehouse, context=context) #update delivery route and rules: unlink the existing rules of the warehouse delivery route and recreate it pull_obj.unlink(cr, uid, [pu.id for pu in warehouse.delivery_route_id.pull_ids], context=context) route_name, values = routes_dict[new_delivery_step] route_obj.write(cr, uid, warehouse.delivery_route_id.id, {'name': self._format_routename(cr, uid, warehouse, route_name, context=context)}, context=context) dummy, pull_rules_list = self._get_push_pull_rules(cr, uid, warehouse, True, values, warehouse.delivery_route_id.id, context=context) #create the pull rules for pull_rule in pull_rules_list: pull_obj.create(cr, uid, vals=pull_rule, context=context) #update receipt route and rules: unlink the existing rules of the warehouse receipt route and recreate it pull_obj.unlink(cr, uid, [pu.id for pu in warehouse.reception_route_id.pull_ids], context=context) push_obj.unlink(cr, uid, [pu.id for pu in warehouse.reception_route_id.push_ids], context=context) route_name, values = routes_dict[new_reception_step] route_obj.write(cr, uid, warehouse.reception_route_id.id, {'name': self._format_routename(cr, uid, warehouse, route_name, context=context)}, context=context) push_rules_list, pull_rules_list = self._get_push_pull_rules(cr, uid, warehouse, True, values, warehouse.reception_route_id.id, context=context) #create the push/pull rules for push_rule in push_rules_list: push_obj.create(cr, uid, vals=push_rule, context=context) for pull_rule in pull_rules_list: #all pull rules in receipt route are mto, because we don't want to wait for the scheduler to trigger an orderpoint on input location pull_rule['procure_method'] = 'make_to_order' pull_obj.create(cr, uid, vals=pull_rule, context=context) route_obj.write(cr, uid, warehouse.crossdock_route_id.id, {'active': new_reception_step != 'one_step' and new_delivery_step != 'ship_only'}, context=context) #change MTO rule dummy, values = routes_dict[new_delivery_step] mto_pull_vals = self._get_mto_pull_rule(cr, uid, warehouse, values, context=context)[0] pull_obj.write(cr, uid, warehouse.mto_pull_id.id, mto_pull_vals, context=context) return True def create_sequences_and_picking_types(self, cr, uid, warehouse, context=None): seq_obj = self.pool.get('ir.sequence') picking_type_obj = self.pool.get('stock.picking.type') #create new sequences in_seq_id = seq_obj.create(cr, SUPERUSER_ID, {'name': warehouse.name + _(' Sequence in'), 'prefix': warehouse.code + '/IN/', 'padding': 5}, context=context) out_seq_id = seq_obj.create(cr, SUPERUSER_ID, {'name': warehouse.name + _(' Sequence out'), 'prefix': warehouse.code + '/OUT/', 'padding': 5}, context=context) pack_seq_id = seq_obj.create(cr, SUPERUSER_ID, {'name': warehouse.name + _(' Sequence packing'), 'prefix': warehouse.code + '/PACK/', 'padding': 5}, context=context) pick_seq_id = seq_obj.create(cr, SUPERUSER_ID, {'name': warehouse.name + _(' Sequence picking'), 'prefix': warehouse.code + '/PICK/', 'padding': 5}, context=context) int_seq_id = seq_obj.create(cr, SUPERUSER_ID, {'name': warehouse.name + _(' Sequence internal'), 'prefix': warehouse.code + '/INT/', 'padding': 5}, context=context) wh_stock_loc = warehouse.lot_stock_id wh_input_stock_loc = warehouse.wh_input_stock_loc_id wh_output_stock_loc = warehouse.wh_output_stock_loc_id wh_pack_stock_loc = warehouse.wh_pack_stock_loc_id #fetch customer and supplier locations, for references customer_loc, supplier_loc = self._get_partner_locations(cr, uid, warehouse.id, context=context) #create in, out, internal picking types for warehouse input_loc = wh_input_stock_loc if warehouse.reception_steps == 'one_step': input_loc = wh_stock_loc output_loc = wh_output_stock_loc if warehouse.delivery_steps == 'ship_only': output_loc = wh_stock_loc #choose the next available color for the picking types of this warehouse color = 0 available_colors = [c%9 for c in range(3, 12)] # put flashy colors first all_used_colors = self.pool.get('stock.picking.type').search_read(cr, uid, [('warehouse_id', '!=', False), ('color', '!=', False)], ['color'], order='color') #don't use sets to preserve the list order for x in all_used_colors: if x['color'] in available_colors: available_colors.remove(x['color']) if available_colors: color = available_colors[0] #order the picking types with a sequence allowing to have the following suit for each warehouse: reception, internal, pick, pack, ship. max_sequence = self.pool.get('stock.picking.type').search_read(cr, uid, [], ['sequence'], order='sequence desc') max_sequence = max_sequence and max_sequence[0]['sequence'] or 0 in_type_id = picking_type_obj.create(cr, uid, vals={ 'name': _('Receipts'), 'warehouse_id': warehouse.id, 'code': 'incoming', 'sequence_id': in_seq_id, 'default_location_src_id': supplier_loc.id, 'default_location_dest_id': input_loc.id, 'sequence': max_sequence + 1, 'color': color}, context=context) out_type_id = picking_type_obj.create(cr, uid, vals={ 'name': _('Delivery Orders'), 'warehouse_id': warehouse.id, 'code': 'outgoing', 'sequence_id': out_seq_id, 'return_picking_type_id': in_type_id, 'default_location_src_id': output_loc.id, 'default_location_dest_id': customer_loc.id, 'sequence': max_sequence + 4, 'color': color}, context=context) picking_type_obj.write(cr, uid, [in_type_id], {'return_picking_type_id': out_type_id}, context=context) int_type_id = picking_type_obj.create(cr, uid, vals={ 'name': _('Internal Transfers'), 'warehouse_id': warehouse.id, 'code': 'internal', 'sequence_id': int_seq_id, 'default_location_src_id': wh_stock_loc.id, 'default_location_dest_id': wh_stock_loc.id, 'active': True, 'sequence': max_sequence + 2, 'color': color}, context=context) pack_type_id = picking_type_obj.create(cr, uid, vals={ 'name': _('Pack'), 'warehouse_id': warehouse.id, 'code': 'internal', 'sequence_id': pack_seq_id, 'default_location_src_id': wh_pack_stock_loc.id, 'default_location_dest_id': output_loc.id, 'active': warehouse.delivery_steps == 'pick_pack_ship', 'sequence': max_sequence + 3, 'color': color}, context=context) pick_type_id = picking_type_obj.create(cr, uid, vals={ 'name': _('Pick'), 'warehouse_id': warehouse.id, 'code': 'internal', 'sequence_id': pick_seq_id, 'default_location_src_id': wh_stock_loc.id, 'default_location_dest_id': output_loc.id if warehouse.delivery_steps == 'pick_ship' else wh_pack_stock_loc.id, 'active': warehouse.delivery_steps != 'ship_only', 'sequence': max_sequence + 2, 'color': color}, context=context) #write picking types on WH vals = { 'in_type_id': in_type_id, 'out_type_id': out_type_id, 'pack_type_id': pack_type_id, 'pick_type_id': pick_type_id, 'int_type_id': int_type_id, } super(stock_warehouse, self).write(cr, uid, warehouse.id, vals=vals, context=context) def create(self, cr, uid, vals, context=None): if context is None: context = {} if vals is None: vals = {} data_obj = self.pool.get('ir.model.data') seq_obj = self.pool.get('ir.sequence') picking_type_obj = self.pool.get('stock.picking.type') location_obj = self.pool.get('stock.location') #create view location for warehouse loc_vals = { 'name': _(vals.get('code')), 'usage': 'view', 'location_id': data_obj.get_object_reference(cr, uid, 'stock', 'stock_location_locations')[1], } if vals.get('company_id'): loc_vals['company_id'] = vals.get('company_id') wh_loc_id = location_obj.create(cr, uid, loc_vals, context=context) vals['view_location_id'] = wh_loc_id #create all location def_values = self.default_get(cr, uid, {'reception_steps', 'delivery_steps'}) reception_steps = vals.get('reception_steps', def_values['reception_steps']) delivery_steps = vals.get('delivery_steps', def_values['delivery_steps']) context_with_inactive = context.copy() context_with_inactive['active_test'] = False sub_locations = [ {'name': _('Stock'), 'active': True, 'field': 'lot_stock_id'}, {'name': _('Input'), 'active': reception_steps != 'one_step', 'field': 'wh_input_stock_loc_id'}, {'name': _('Quality Control'), 'active': reception_steps == 'three_steps', 'field': 'wh_qc_stock_loc_id'}, {'name': _('Output'), 'active': delivery_steps != 'ship_only', 'field': 'wh_output_stock_loc_id'}, {'name': _('Packing Zone'), 'active': delivery_steps == 'pick_pack_ship', 'field': 'wh_pack_stock_loc_id'}, ] for values in sub_locations: loc_vals = { 'name': values['name'], 'usage': 'internal', 'location_id': wh_loc_id, 'active': values['active'], } if vals.get('company_id'): loc_vals['company_id'] = vals.get('company_id') location_id = location_obj.create(cr, uid, loc_vals, context=context_with_inactive) vals[values['field']] = location_id #create WH new_id = super(stock_warehouse, self).create(cr, uid, vals=vals, context=context) warehouse = self.browse(cr, uid, new_id, context=context) self.create_sequences_and_picking_types(cr, uid, warehouse, context=context) #create routes and push/pull rules new_objects_dict = self.create_routes(cr, uid, new_id, warehouse, context=context) self.write(cr, uid, warehouse.id, new_objects_dict, context=context) return new_id def _format_rulename(self, cr, uid, obj, from_loc, dest_loc, context=None): return obj.code + ': ' + from_loc.name + ' -> ' + dest_loc.name def _format_routename(self, cr, uid, obj, name, context=None): return obj.name + ': ' + name def get_routes_dict(self, cr, uid, ids, warehouse, context=None): #fetch customer and supplier locations, for references customer_loc, supplier_loc = self._get_partner_locations(cr, uid, ids, context=context) return { 'one_step': (_('Receipt in 1 step'), []), 'two_steps': (_('Receipt in 2 steps'), [(warehouse.wh_input_stock_loc_id, warehouse.lot_stock_id, warehouse.int_type_id.id)]), 'three_steps': (_('Receipt in 3 steps'), [(warehouse.wh_input_stock_loc_id, warehouse.wh_qc_stock_loc_id, warehouse.int_type_id.id), (warehouse.wh_qc_stock_loc_id, warehouse.lot_stock_id, warehouse.int_type_id.id)]), 'crossdock': (_('Cross-Dock'), [(warehouse.wh_input_stock_loc_id, warehouse.wh_output_stock_loc_id, warehouse.int_type_id.id), (warehouse.wh_output_stock_loc_id, customer_loc, warehouse.out_type_id.id)]), 'ship_only': (_('Ship Only'), [(warehouse.lot_stock_id, customer_loc, warehouse.out_type_id.id)]), 'pick_ship': (_('Pick + Ship'), [(warehouse.lot_stock_id, warehouse.wh_output_stock_loc_id, warehouse.pick_type_id.id), (warehouse.wh_output_stock_loc_id, customer_loc, warehouse.out_type_id.id)]), 'pick_pack_ship': (_('Pick + Pack + Ship'), [(warehouse.lot_stock_id, warehouse.wh_pack_stock_loc_id, warehouse.pick_type_id.id), (warehouse.wh_pack_stock_loc_id, warehouse.wh_output_stock_loc_id, warehouse.pack_type_id.id), (warehouse.wh_output_stock_loc_id, customer_loc, warehouse.out_type_id.id)]), } def _handle_renaming(self, cr, uid, warehouse, name, code, context=None): location_obj = self.pool.get('stock.location') route_obj = self.pool.get('stock.location.route') pull_obj = self.pool.get('procurement.rule') push_obj = self.pool.get('stock.location.path') #rename location location_id = warehouse.lot_stock_id.location_id.id location_obj.write(cr, uid, location_id, {'name': code}, context=context) #rename route and push-pull rules for route in warehouse.route_ids: route_obj.write(cr, uid, route.id, {'name': route.name.replace(warehouse.name, name, 1)}, context=context) for pull in route.pull_ids: pull_obj.write(cr, uid, pull.id, {'name': pull.name.replace(warehouse.name, name, 1)}, context=context) for push in route.push_ids: push_obj.write(cr, uid, push.id, {'name': pull.name.replace(warehouse.name, name, 1)}, context=context) #change the mto pull rule name if warehouse.mto_pull_id.id: pull_obj.write(cr, uid, warehouse.mto_pull_id.id, {'name': warehouse.mto_pull_id.name.replace(warehouse.name, name, 1)}, context=context) def _check_delivery_resupply(self, cr, uid, warehouse, new_location, change_to_multiple, context=None): """ Will check if the resupply routes from this warehouse follow the changes of number of delivery steps """ #Check routes that are being delivered by this warehouse and change the rule going to transit location route_obj = self.pool.get("stock.location.route") pull_obj = self.pool.get("procurement.rule") routes = route_obj.search(cr, uid, [('supplier_wh_id','=', warehouse.id)], context=context) pulls = pull_obj.search(cr, uid, ['&', ('route_id', 'in', routes), ('location_id.usage', '=', 'transit')], context=context) if pulls: pull_obj.write(cr, uid, pulls, {'location_src_id': new_location, 'procure_method': change_to_multiple and "make_to_order" or "make_to_stock"}, context=context) # Create or clean MTO rules mto_route_id = self._get_mto_route(cr, uid, context=context) if not change_to_multiple: # If single delivery we should create the necessary MTO rules for the resupply # pulls = pull_obj.search(cr, uid, ['&', ('route_id', '=', mto_route_id), ('location_id.usage', '=', 'transit'), ('location_src_id', '=', warehouse.lot_stock_id.id)], context=context) pull_recs = pull_obj.browse(cr, uid, pulls, context=context) transfer_locs = list(set([x.location_id for x in pull_recs])) vals = [(warehouse.lot_stock_id , x, warehouse.out_type_id.id) for x in transfer_locs] mto_pull_vals = self._get_mto_pull_rule(cr, uid, warehouse, vals, context=context) for mto_pull_val in mto_pull_vals: pull_obj.create(cr, uid, mto_pull_val, context=context) else: # We need to delete all the MTO pull rules, otherwise they risk to be used in the system pulls = pull_obj.search(cr, uid, ['&', ('route_id', '=', mto_route_id), ('location_id.usage', '=', 'transit'), ('location_src_id', '=', warehouse.lot_stock_id.id)], context=context) if pulls: pull_obj.unlink(cr, uid, pulls, context=context) def _check_reception_resupply(self, cr, uid, warehouse, new_location, context=None): """ Will check if the resupply routes to this warehouse follow the changes of number of receipt steps """ #Check routes that are being delivered by this warehouse and change the rule coming from transit location route_obj = self.pool.get("stock.location.route") pull_obj = self.pool.get("procurement.rule") routes = route_obj.search(cr, uid, [('supplied_wh_id','=', warehouse.id)], context=context) pulls= pull_obj.search(cr, uid, ['&', ('route_id', 'in', routes), ('location_src_id.usage', '=', 'transit')]) if pulls: pull_obj.write(cr, uid, pulls, {'location_id': new_location}, context=context) def _check_resupply(self, cr, uid, warehouse, reception_new, delivery_new, context=None): if reception_new: old_val = warehouse.reception_steps new_val = reception_new change_to_one = (old_val != 'one_step' and new_val == 'one_step') change_to_multiple = (old_val == 'one_step' and new_val != 'one_step') if change_to_one or change_to_multiple: new_location = change_to_one and warehouse.lot_stock_id.id or warehouse.wh_input_stock_loc_id.id self._check_reception_resupply(cr, uid, warehouse, new_location, context=context) if delivery_new: old_val = warehouse.delivery_steps new_val = delivery_new change_to_one = (old_val != 'ship_only' and new_val == 'ship_only') change_to_multiple = (old_val == 'ship_only' and new_val != 'ship_only') if change_to_one or change_to_multiple: new_location = change_to_one and warehouse.lot_stock_id.id or warehouse.wh_output_stock_loc_id.id self._check_delivery_resupply(cr, uid, warehouse, new_location, change_to_multiple, context=context) def write(self, cr, uid, ids, vals, context=None): if context is None: context = {} if isinstance(ids, (int, long)): ids = [ids] seq_obj = self.pool.get('ir.sequence') route_obj = self.pool.get('stock.location.route') context_with_inactive = context.copy() context_with_inactive['active_test'] = False for warehouse in self.browse(cr, uid, ids, context=context_with_inactive): #first of all, check if we need to delete and recreate route if vals.get('reception_steps') or vals.get('delivery_steps'): #activate and deactivate location according to reception and delivery option self.switch_location(cr, uid, warehouse.id, warehouse, vals.get('reception_steps', False), vals.get('delivery_steps', False), context=context) # switch between route self.change_route(cr, uid, ids, warehouse, vals.get('reception_steps', False), vals.get('delivery_steps', False), context=context_with_inactive) # Check if we need to change something to resupply warehouses and associated MTO rules self._check_resupply(cr, uid, warehouse, vals.get('reception_steps'), vals.get('delivery_steps'), context=context) if vals.get('code') or vals.get('name'): name = warehouse.name #rename sequence if vals.get('name'): name = vals.get('name', warehouse.name) self._handle_renaming(cr, uid, warehouse, name, vals.get('code', warehouse.code), context=context_with_inactive) if warehouse.in_type_id: seq_obj.write(cr, uid, warehouse.in_type_id.sequence_id.id, {'name': name + _(' Sequence in'), 'prefix': vals.get('code', warehouse.code) + '\IN\\'}, context=context) if warehouse.out_type_id: seq_obj.write(cr, uid, warehouse.out_type_id.sequence_id.id, {'name': name + _(' Sequence out'), 'prefix': vals.get('code', warehouse.code) + '\OUT\\'}, context=context) if warehouse.pack_type_id: seq_obj.write(cr, uid, warehouse.pack_type_id.sequence_id.id, {'name': name + _(' Sequence packing'), 'prefix': vals.get('code', warehouse.code) + '\PACK\\'}, context=context) if warehouse.pick_type_id: seq_obj.write(cr, uid, warehouse.pick_type_id.sequence_id.id, {'name': name + _(' Sequence picking'), 'prefix': vals.get('code', warehouse.code) + '\PICK\\'}, context=context) if warehouse.int_type_id: seq_obj.write(cr, uid, warehouse.int_type_id.sequence_id.id, {'name': name + _(' Sequence internal'), 'prefix': vals.get('code', warehouse.code) + '\INT\\'}, context=context) if vals.get('resupply_wh_ids') and not vals.get('resupply_route_ids'): for cmd in vals.get('resupply_wh_ids'): if cmd[0] == 6: new_ids = set(cmd[2]) old_ids = set([wh.id for wh in warehouse.resupply_wh_ids]) to_add_wh_ids = new_ids - old_ids if to_add_wh_ids: supplier_warehouses = self.browse(cr, uid, list(to_add_wh_ids), context=context) self._create_resupply_routes(cr, uid, warehouse, supplier_warehouses, warehouse.default_resupply_wh_id, context=context) to_remove_wh_ids = old_ids - new_ids if to_remove_wh_ids: to_remove_route_ids = route_obj.search(cr, uid, [('supplied_wh_id', '=', warehouse.id), ('supplier_wh_id', 'in', list(to_remove_wh_ids))], context=context) if to_remove_route_ids: route_obj.unlink(cr, uid, to_remove_route_ids, context=context) else: #not implemented pass if 'default_resupply_wh_id' in vals: if vals.get('default_resupply_wh_id') == warehouse.id: raise osv.except_osv(_('Warning'),_('The default resupply warehouse should be different than the warehouse itself!')) if warehouse.default_resupply_wh_id: #remove the existing resupplying route on the warehouse to_remove_route_ids = route_obj.search(cr, uid, [('supplied_wh_id', '=', warehouse.id), ('supplier_wh_id', '=', warehouse.default_resupply_wh_id.id)], context=context) for inter_wh_route_id in to_remove_route_ids: self.write(cr, uid, [warehouse.id], {'route_ids': [(3, inter_wh_route_id)]}) if vals.get('default_resupply_wh_id'): #assign the new resupplying route on all products to_assign_route_ids = route_obj.search(cr, uid, [('supplied_wh_id', '=', warehouse.id), ('supplier_wh_id', '=', vals.get('default_resupply_wh_id'))], context=context) for inter_wh_route_id in to_assign_route_ids: self.write(cr, uid, [warehouse.id], {'route_ids': [(4, inter_wh_route_id)]}) return super(stock_warehouse, self).write(cr, uid, ids, vals=vals, context=context) def get_all_routes_for_wh(self, cr, uid, warehouse, context=None): route_obj = self.pool.get("stock.location.route") all_routes = [route.id for route in warehouse.route_ids] all_routes += route_obj.search(cr, uid, [('supplied_wh_id', '=', warehouse.id)], context=context) all_routes += [warehouse.mto_pull_id.route_id.id] return all_routes def view_all_routes_for_wh(self, cr, uid, ids, context=None): all_routes = [] for wh in self.browse(cr, uid, ids, context=context): all_routes += self.get_all_routes_for_wh(cr, uid, wh, context=context) domain = [('id', 'in', all_routes)] return { 'name': _('Warehouse\'s Routes'), 'domain': domain, 'res_model': 'stock.location.route', 'type': 'ir.actions.act_window', 'view_id': False, 'view_mode': 'tree,form', 'view_type': 'form', 'limit': 20 } class stock_location_path(osv.osv): _name = "stock.location.path" _description = "Pushed Flows" _order = "name" def _get_rules(self, cr, uid, ids, context=None): res = [] for route in self.browse(cr, uid, ids, context=context): res += [x.id for x in route.push_ids] return res _columns = { 'name': fields.char('Operation Name', required=True), 'company_id': fields.many2one('res.company', 'Company'), 'route_id': fields.many2one('stock.location.route', 'Route'), 'location_from_id': fields.many2one('stock.location', 'Source Location', ondelete='cascade', select=1, required=True), 'location_dest_id': fields.many2one('stock.location', 'Destination Location', ondelete='cascade', select=1, required=True), 'delay': fields.integer('Delay (days)', help="Number of days to do this transition"), 'picking_type_id': fields.many2one('stock.picking.type', 'Type of the new Operation', required=True, help="This is the picking type associated with the different pickings"), 'auto': fields.selection( [('auto','Automatic Move'), ('manual','Manual Operation'),('transparent','Automatic No Step Added')], 'Automatic Move', required=True, select=1, help="This is used to define paths the product has to follow within the location tree.\n" \ "The 'Automatic Move' value will create a stock move after the current one that will be "\ "validated automatically. With 'Manual Operation', the stock move has to be validated "\ "by a worker. With 'Automatic No Step Added', the location is replaced in the original move." ), 'propagate': fields.boolean('Propagate cancel and split', help='If checked, when the previous move is cancelled or split, the move generated by this move will too'), 'active': fields.boolean('Active', help="If unchecked, it will allow you to hide the rule without removing it."), 'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse'), 'route_sequence': fields.related('route_id', 'sequence', string='Route Sequence', store={ 'stock.location.route': (_get_rules, ['sequence'], 10), 'stock.location.path': (lambda self, cr, uid, ids, c={}: ids, ['route_id'], 10), }), 'sequence': fields.integer('Sequence'), } _defaults = { 'auto': 'auto', 'delay': 0, 'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'procurement.order', context=c), 'propagate': True, 'active': True, } def _prepare_push_apply(self, cr, uid, rule, move, context=None): newdate = (datetime.strptime(move.date_expected, DEFAULT_SERVER_DATETIME_FORMAT) + relativedelta.relativedelta(days=rule.delay or 0)).strftime(DEFAULT_SERVER_DATETIME_FORMAT) return { 'origin': move.origin or move.picking_id.name or "/", 'location_id': move.location_dest_id.id, 'location_dest_id': rule.location_dest_id.id, 'date': newdate, 'company_id': rule.company_id and rule.company_id.id or False, 'date_expected': newdate, 'picking_id': False, 'picking_type_id': rule.picking_type_id and rule.picking_type_id.id or False, 'propagate': rule.propagate, 'push_rule_id': rule.id, 'warehouse_id': rule.warehouse_id and rule.warehouse_id.id or False, } def _apply(self, cr, uid, rule, move, context=None): move_obj = self.pool.get('stock.move') newdate = (datetime.strptime(move.date_expected, DEFAULT_SERVER_DATETIME_FORMAT) + relativedelta.relativedelta(days=rule.delay or 0)).strftime(DEFAULT_SERVER_DATETIME_FORMAT) if rule.auto == 'transparent': old_dest_location = move.location_dest_id.id move_obj.write(cr, uid, [move.id], { 'date': newdate, 'date_expected': newdate, 'location_dest_id': rule.location_dest_id.id }) #avoid looping if a push rule is not well configured if rule.location_dest_id.id != old_dest_location: #call again push_apply to see if a next step is defined move_obj._push_apply(cr, uid, [move], context=context) else: vals = self._prepare_push_apply(cr, uid, rule, move, context=context) move_id = move_obj.copy(cr, uid, move.id, vals, context=context) move_obj.write(cr, uid, [move.id], { 'move_dest_id': move_id, }) move_obj.action_confirm(cr, uid, [move_id], context=None) # ------------------------- # Packaging related stuff # ------------------------- from openerp.report import report_sxw class stock_package(osv.osv): """ These are the packages, containing quants and/or other packages """ _name = "stock.quant.package" _description = "Physical Packages" _parent_name = "parent_id" _parent_store = True _parent_order = 'name' _order = 'parent_left' def name_get(self, cr, uid, ids, context=None): res = self._complete_name(cr, uid, ids, 'complete_name', None, context=context) return res.items() def _complete_name(self, cr, uid, ids, name, args, context=None): """ Forms complete name of location from parent location to child location. @return: Dictionary of values """ res = {} for m in self.browse(cr, uid, ids, context=context): res[m.id] = m.name parent = m.parent_id while parent: res[m.id] = parent.name + ' / ' + res[m.id] parent = parent.parent_id return res def _get_packages(self, cr, uid, ids, context=None): """Returns packages from quants for store""" res = set() for quant in self.browse(cr, uid, ids, context=context): pack = quant.package_id while pack: res.add(pack.id) pack = pack.parent_id return list(res) def _get_package_info(self, cr, uid, ids, name, args, context=None): quant_obj = self.pool.get("stock.quant") default_company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id res = dict((res_id, {'location_id': False, 'company_id': default_company_id, 'owner_id': False}) for res_id in ids) for pack in self.browse(cr, uid, ids, context=context): quants = quant_obj.search(cr, uid, [('package_id', 'child_of', pack.id)], context=context) if quants: quant = quant_obj.browse(cr, uid, quants[0], context=context) res[pack.id]['location_id'] = quant.location_id.id res[pack.id]['owner_id'] = quant.owner_id.id res[pack.id]['company_id'] = quant.company_id.id else: res[pack.id]['location_id'] = False res[pack.id]['owner_id'] = False res[pack.id]['company_id'] = False return res def _get_packages_to_relocate(self, cr, uid, ids, context=None): res = set() for pack in self.browse(cr, uid, ids, context=context): res.add(pack.id) if pack.parent_id: res.add(pack.parent_id.id) return list(res) _columns = { 'name': fields.char('Package Reference', select=True, copy=False), 'complete_name': fields.function(_complete_name, type='char', string="Package Name",), 'parent_left': fields.integer('Left Parent', select=1), 'parent_right': fields.integer('Right Parent', select=1), 'packaging_id': fields.many2one('product.packaging', 'Packaging', help="This field should be completed only if everything inside the package share the same product, otherwise it doesn't really makes sense.", select=True), 'ul_id': fields.many2one('product.ul', 'Logistic Unit'), 'location_id': fields.function(_get_package_info, type='many2one', relation='stock.location', string='Location', multi="package", store={ 'stock.quant': (_get_packages, ['location_id'], 10), 'stock.quant.package': (_get_packages_to_relocate, ['quant_ids', 'children_ids', 'parent_id'], 10), }, readonly=True, select=True), 'quant_ids': fields.one2many('stock.quant', 'package_id', 'Bulk Content', readonly=True), 'parent_id': fields.many2one('stock.quant.package', 'Parent Package', help="The package containing this item", ondelete='restrict', readonly=True), 'children_ids': fields.one2many('stock.quant.package', 'parent_id', 'Contained Packages', readonly=True), 'company_id': fields.function(_get_package_info, type="many2one", relation='res.company', string='Company', multi="package", store={ 'stock.quant': (_get_packages, ['company_id'], 10), 'stock.quant.package': (_get_packages_to_relocate, ['quant_ids', 'children_ids', 'parent_id'], 10), }, readonly=True, select=True), 'owner_id': fields.function(_get_package_info, type='many2one', relation='res.partner', string='Owner', multi="package", store={ 'stock.quant': (_get_packages, ['owner_id'], 10), 'stock.quant.package': (_get_packages_to_relocate, ['quant_ids', 'children_ids', 'parent_id'], 10), }, readonly=True, select=True), } _defaults = { 'name': lambda self, cr, uid, context: self.pool.get('ir.sequence').get(cr, uid, 'stock.quant.package') or _('Unknown Pack') } def _check_location_constraint(self, cr, uid, packs, context=None): '''checks that all quants in a package are stored in the same location. This function cannot be used as a constraint because it needs to be checked on pack operations (they may not call write on the package) ''' quant_obj = self.pool.get('stock.quant') for pack in packs: parent = pack while parent.parent_id: parent = parent.parent_id quant_ids = self.get_content(cr, uid, [parent.id], context=context) quants = [x for x in quant_obj.browse(cr, uid, quant_ids, context=context) if x.qty > 0] location_id = quants and quants[0].location_id.id or False if not [quant.location_id.id == location_id for quant in quants]: raise osv.except_osv(_('Error'), _('Everything inside a package should be in the same location')) return True def action_print(self, cr, uid, ids, context=None): context = dict(context or {}, active_ids=ids) return self.pool.get("report").get_action(cr, uid, ids, 'stock.report_package_barcode_small', context=context) def unpack(self, cr, uid, ids, context=None): quant_obj = self.pool.get('stock.quant') for package in self.browse(cr, uid, ids, context=context): quant_ids = [quant.id for quant in package.quant_ids] quant_obj.write(cr, uid, quant_ids, {'package_id': package.parent_id.id or False}, context=context) children_package_ids = [child_package.id for child_package in package.children_ids] self.write(cr, uid, children_package_ids, {'parent_id': package.parent_id.id or False}, context=context) #delete current package since it contains nothing anymore self.unlink(cr, uid, ids, context=context) return self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'stock', 'action_package_view', context=context) def get_content(self, cr, uid, ids, context=None): child_package_ids = self.search(cr, uid, [('id', 'child_of', ids)], context=context) return self.pool.get('stock.quant').search(cr, uid, [('package_id', 'in', child_package_ids)], context=context) def get_content_package(self, cr, uid, ids, context=None): quants_ids = self.get_content(cr, uid, ids, context=context) res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'stock', 'quantsact', context=context) res['domain'] = [('id', 'in', quants_ids)] return res def _get_product_total_qty(self, cr, uid, package_record, product_id, context=None): ''' find the total of given product 'product_id' inside the given package 'package_id''' quant_obj = self.pool.get('stock.quant') all_quant_ids = self.get_content(cr, uid, [package_record.id], context=context) total = 0 for quant in quant_obj.browse(cr, uid, all_quant_ids, context=context): if quant.product_id.id == product_id: total += quant.qty return total def _get_all_products_quantities(self, cr, uid, package_id, context=None): '''This function computes the different product quantities for the given package ''' quant_obj = self.pool.get('stock.quant') res = {} for quant in quant_obj.browse(cr, uid, self.get_content(cr, uid, package_id, context=context)): if quant.product_id.id not in res: res[quant.product_id.id] = 0 res[quant.product_id.id] += quant.qty return res def copy_pack(self, cr, uid, id, default_pack_values=None, default=None, context=None): stock_pack_operation_obj = self.pool.get('stock.pack.operation') if default is None: default = {} new_package_id = self.copy(cr, uid, id, default_pack_values, context=context) default['result_package_id'] = new_package_id op_ids = stock_pack_operation_obj.search(cr, uid, [('result_package_id', '=', id)], context=context) for op_id in op_ids: stock_pack_operation_obj.copy(cr, uid, op_id, default, context=context) class stock_pack_operation(osv.osv): _name = "stock.pack.operation" _description = "Packing Operation" def _get_remaining_prod_quantities(self, cr, uid, operation, context=None): '''Get the remaining quantities per product on an operation with a package. This function returns a dictionary''' #if the operation doesn't concern a package, it's not relevant to call this function if not operation.package_id or operation.product_id: return {operation.product_id.id: operation.remaining_qty} #get the total of products the package contains res = self.pool.get('stock.quant.package')._get_all_products_quantities(cr, uid, operation.package_id.id, context=context) #reduce by the quantities linked to a move for record in operation.linked_move_operation_ids: if record.move_id.product_id.id not in res: res[record.move_id.product_id.id] = 0 res[record.move_id.product_id.id] -= record.qty return res def _get_remaining_qty(self, cr, uid, ids, name, args, context=None): uom_obj = self.pool.get('product.uom') res = {} for ops in self.browse(cr, uid, ids, context=context): res[ops.id] = 0 if ops.package_id and not ops.product_id: #dont try to compute the remaining quantity for packages because it's not relevant (a package could include different products). #should use _get_remaining_prod_quantities instead continue else: qty = ops.product_qty if ops.product_uom_id: qty = uom_obj._compute_qty_obj(cr, uid, ops.product_uom_id, ops.product_qty, ops.product_id.uom_id, context=context) for record in ops.linked_move_operation_ids: qty -= record.qty res[ops.id] = float_round(qty, precision_rounding=ops.product_id.uom_id.rounding) return res def product_id_change(self, cr, uid, ids, product_id, product_uom_id, product_qty, context=None): res = self.on_change_tests(cr, uid, ids, product_id, product_uom_id, product_qty, context=context) if product_id and not product_uom_id: product = self.pool.get('product.product').browse(cr, uid, product_id, context=context) res['value']['product_uom_id'] = product.uom_id.id return res def on_change_tests(self, cr, uid, ids, product_id, product_uom_id, product_qty, context=None): res = {'value': {}} uom_obj = self.pool.get('product.uom') if product_id: product = self.pool.get('product.product').browse(cr, uid, product_id, context=context) product_uom_id = product_uom_id or product.uom_id.id selected_uom = uom_obj.browse(cr, uid, product_uom_id, context=context) if selected_uom.category_id.id != product.uom_id.category_id.id: res['warning'] = { 'title': _('Warning: wrong UoM!'), 'message': _('The selected UoM for product %s is not compatible with the UoM set on the product form. \nPlease choose an UoM within the same UoM category.') % (product.name) } if product_qty and 'warning' not in res: rounded_qty = uom_obj._compute_qty(cr, uid, product_uom_id, product_qty, product_uom_id, round=True) if rounded_qty != product_qty: res['warning'] = { 'title': _('Warning: wrong quantity!'), 'message': _('The chosen quantity for product %s is not compatible with the UoM rounding. It will be automatically converted at confirmation') % (product.name) } return res _columns = { 'picking_id': fields.many2one('stock.picking', 'Stock Picking', help='The stock operation where the packing has been made', required=True), 'product_id': fields.many2one('product.product', 'Product', ondelete="CASCADE"), # 1 'product_uom_id': fields.many2one('product.uom', 'Product Unit of Measure'), 'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True), 'qty_done': fields.float('Quantity Processed', digits_compute=dp.get_precision('Product Unit of Measure')), 'package_id': fields.many2one('stock.quant.package', 'Source Package'), # 2 'lot_id': fields.many2one('stock.production.lot', 'Lot/Serial Number'), 'result_package_id': fields.many2one('stock.quant.package', 'Destination Package', help="If set, the operations are packed into this package", required=False, ondelete='cascade'), 'date': fields.datetime('Date', required=True), 'owner_id': fields.many2one('res.partner', 'Owner', help="Owner of the quants"), #'update_cost': fields.boolean('Need cost update'), 'cost': fields.float("Cost", help="Unit Cost for this product line"), 'currency': fields.many2one('res.currency', string="Currency", help="Currency in which Unit cost is expressed", ondelete='CASCADE'), 'linked_move_operation_ids': fields.one2many('stock.move.operation.link', 'operation_id', string='Linked Moves', readonly=True, help='Moves impacted by this operation for the computation of the remaining quantities'), 'remaining_qty': fields.function(_get_remaining_qty, type='float', digits = 0, string="Remaining Qty", help="Remaining quantity in default UoM according to moves matched with this operation. "), 'location_id': fields.many2one('stock.location', 'Source Location', required=True), 'location_dest_id': fields.many2one('stock.location', 'Destination Location', required=True), 'processed': fields.selection([('true','Yes'), ('false','No')],'Has been processed?', required=True), } _defaults = { 'date': fields.date.context_today, 'qty_done': 0, 'processed': lambda *a: 'false', } def write(self, cr, uid, ids, vals, context=None): context = context or {} res = super(stock_pack_operation, self).write(cr, uid, ids, vals, context=context) if isinstance(ids, (int, long)): ids = [ids] if not context.get("no_recompute"): pickings = vals.get('picking_id') and [vals['picking_id']] or list(set([x.picking_id.id for x in self.browse(cr, uid, ids, context=context)])) self.pool.get("stock.picking").do_recompute_remaining_quantities(cr, uid, pickings, context=context) return res def create(self, cr, uid, vals, context=None): context = context or {} res_id = super(stock_pack_operation, self).create(cr, uid, vals, context=context) if vals.get("picking_id") and not context.get("no_recompute"): self.pool.get("stock.picking").do_recompute_remaining_quantities(cr, uid, [vals['picking_id']], context=context) return res_id def action_drop_down(self, cr, uid, ids, context=None): ''' Used by barcode interface to say that pack_operation has been moved from src location to destination location, if qty_done is less than product_qty than we have to split the operation in two to process the one with the qty moved ''' processed_ids = [] move_obj = self.pool.get("stock.move") for pack_op in self.browse(cr, uid, ids, context=None): if pack_op.product_id and pack_op.location_id and pack_op.location_dest_id: move_obj.check_tracking_product(cr, uid, pack_op.product_id, pack_op.lot_id.id, pack_op.location_id, pack_op.location_dest_id, context=context) op = pack_op.id if pack_op.qty_done < pack_op.product_qty: # we split the operation in two op = self.copy(cr, uid, pack_op.id, {'product_qty': pack_op.qty_done, 'qty_done': pack_op.qty_done}, context=context) self.write(cr, uid, [pack_op.id], {'product_qty': pack_op.product_qty - pack_op.qty_done, 'qty_done': 0}, context=context) processed_ids.append(op) self.write(cr, uid, processed_ids, {'processed': 'true'}, context=context) def create_and_assign_lot(self, cr, uid, id, name, context=None): ''' Used by barcode interface to create a new lot and assign it to the operation ''' obj = self.browse(cr,uid,id,context) product_id = obj.product_id.id val = {'product_id': product_id} new_lot_id = False if name: lots = self.pool.get('stock.production.lot').search(cr, uid, ['&', ('name', '=', name), ('product_id', '=', product_id)], context=context) if lots: new_lot_id = lots[0] val.update({'name': name}) if not new_lot_id: new_lot_id = self.pool.get('stock.production.lot').create(cr, uid, val, context=context) self.write(cr, uid, id, {'lot_id': new_lot_id}, context=context) def _search_and_increment(self, cr, uid, picking_id, domain, filter_visible=False, visible_op_ids=False, increment=True, context=None): '''Search for an operation with given 'domain' in a picking, if it exists increment the qty (+1) otherwise create it :param domain: list of tuple directly reusable as a domain context can receive a key 'current_package_id' with the package to consider for this operation returns True ''' if context is None: context = {} #if current_package_id is given in the context, we increase the number of items in this package package_clause = [('result_package_id', '=', context.get('current_package_id', False))] existing_operation_ids = self.search(cr, uid, [('picking_id', '=', picking_id)] + domain + package_clause, context=context) todo_operation_ids = [] if existing_operation_ids: if filter_visible: todo_operation_ids = [val for val in existing_operation_ids if val in visible_op_ids] else: todo_operation_ids = existing_operation_ids if todo_operation_ids: #existing operation found for the given domain and picking => increment its quantity operation_id = todo_operation_ids[0] op_obj = self.browse(cr, uid, operation_id, context=context) qty = op_obj.qty_done if increment: qty += 1 else: qty -= 1 if qty >= 1 else 0 if qty == 0 and op_obj.product_qty == 0: #we have a line with 0 qty set, so delete it self.unlink(cr, uid, [operation_id], context=context) return False self.write(cr, uid, [operation_id], {'qty_done': qty}, context=context) else: #no existing operation found for the given domain and picking => create a new one picking_obj = self.pool.get("stock.picking") picking = picking_obj.browse(cr, uid, picking_id, context=context) values = { 'picking_id': picking_id, 'product_qty': 0, 'location_id': picking.location_id.id, 'location_dest_id': picking.location_dest_id.id, 'qty_done': 1, } for key in domain: var_name, dummy, value = key uom_id = False if var_name == 'product_id': uom_id = self.pool.get('product.product').browse(cr, uid, value, context=context).uom_id.id update_dict = {var_name: value} if uom_id: update_dict['product_uom_id'] = uom_id values.update(update_dict) operation_id = self.create(cr, uid, values, context=context) return operation_id class stock_move_operation_link(osv.osv): """ Table making the link between stock.moves and stock.pack.operations to compute the remaining quantities on each of these objects """ _name = "stock.move.operation.link" _description = "Link between stock moves and pack operations" _columns = { 'qty': fields.float('Quantity', help="Quantity of products to consider when talking about the contribution of this pack operation towards the remaining quantity of the move (and inverse). Given in the product main uom."), 'operation_id': fields.many2one('stock.pack.operation', 'Operation', required=True, ondelete="cascade"), 'move_id': fields.many2one('stock.move', 'Move', required=True, ondelete="cascade"), 'reserved_quant_id': fields.many2one('stock.quant', 'Reserved Quant', help="Technical field containing the quant that created this link between an operation and a stock move. Used at the stock_move_obj.action_done() time to avoid seeking a matching quant again"), } def get_specific_domain(self, cr, uid, record, context=None): '''Returns the specific domain to consider for quant selection in action_assign() or action_done() of stock.move, having the record given as parameter making the link between the stock move and a pack operation''' op = record.operation_id domain = [] if op.package_id and op.product_id: #if removing a product from a box, we restrict the choice of quants to this box domain.append(('package_id', '=', op.package_id.id)) elif op.package_id: #if moving a box, we allow to take everything from inside boxes as well domain.append(('package_id', 'child_of', [op.package_id.id])) else: #if not given any information about package, we don't open boxes domain.append(('package_id', '=', False)) #if lot info is given, we restrict choice to this lot otherwise we can take any if op.lot_id: domain.append(('lot_id', '=', op.lot_id.id)) #if owner info is given, we restrict to this owner otherwise we restrict to no owner if op.owner_id: domain.append(('owner_id', '=', op.owner_id.id)) else: domain.append(('owner_id', '=', False)) return domain class stock_warehouse_orderpoint(osv.osv): """ Defines Minimum stock rules. """ _name = "stock.warehouse.orderpoint" _description = "Minimum Inventory Rule" def subtract_procurements(self, cr, uid, orderpoint, context=None): '''This function returns quantity of product that needs to be deducted from the orderpoint computed quantity because there's already a procurement created with aim to fulfill it. ''' qty = 0 uom_obj = self.pool.get("product.uom") for procurement in orderpoint.procurement_ids: if procurement.state in ('cancel', 'done'): continue procurement_qty = uom_obj._compute_qty_obj(cr, uid, procurement.product_uom, procurement.product_qty, procurement.product_id.uom_id, context=context) for move in procurement.move_ids: #need to add the moves in draft as they aren't in the virtual quantity + moves that have not been created yet if move.state not in ('draft'): #if move is already confirmed, assigned or done, the virtual stock is already taking this into account so it shouldn't be deducted procurement_qty -= move.product_qty qty += procurement_qty return qty def _check_product_uom(self, cr, uid, ids, context=None): ''' Check if the UoM has the same category as the product standard UoM ''' if not context: context = {} for rule in self.browse(cr, uid, ids, context=context): if rule.product_id.uom_id.category_id.id != rule.product_uom.category_id.id: return False return True def action_view_proc_to_process(self, cr, uid, ids, context=None): act_obj = self.pool.get('ir.actions.act_window') mod_obj = self.pool.get('ir.model.data') proc_ids = self.pool.get('procurement.order').search(cr, uid, [('orderpoint_id', 'in', ids), ('state', 'not in', ('done', 'cancel'))], context=context) result = mod_obj.get_object_reference(cr, uid, 'procurement', 'do_view_procurements') if not result: return False result = act_obj.read(cr, uid, [result[1]], context=context)[0] result['domain'] = "[('id', 'in', [" + ','.join(map(str, proc_ids)) + "])]" return result _columns = { 'name': fields.char('Name', required=True, copy=False), 'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the orderpoint without removing it."), 'logic': fields.selection([('max', 'Order to Max'), ('price', 'Best price (not yet active!)')], 'Reordering Mode', required=True), 'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', required=True, ondelete="cascade"), 'location_id': fields.many2one('stock.location', 'Location', required=True, ondelete="cascade"), 'product_id': fields.many2one('product.product', 'Product', required=True, ondelete='cascade', domain=[('type', '=', 'product')]), 'product_uom': fields.related('product_id', 'uom_id', type='many2one', relation='product.uom', string='Product Unit of Measure', readonly=True, required=True), 'product_min_qty': fields.float('Minimum Quantity', required=True, digits_compute=dp.get_precision('Product Unit of Measure'), help="When the virtual stock goes below the Min Quantity specified for this field, Odoo generates "\ "a procurement to bring the forecasted quantity to the Max Quantity."), 'product_max_qty': fields.float('Maximum Quantity', required=True, digits_compute=dp.get_precision('Product Unit of Measure'), help="When the virtual stock goes below the Min Quantity, Odoo generates "\ "a procurement to bring the forecasted quantity to the Quantity specified as Max Quantity."), 'qty_multiple': fields.float('Qty Multiple', required=True, digits_compute=dp.get_precision('Product Unit of Measure'), help="The procurement quantity will be rounded up to this multiple. If it is 0, the exact quantity will be used. "), 'procurement_ids': fields.one2many('procurement.order', 'orderpoint_id', 'Created Procurements'), 'group_id': fields.many2one('procurement.group', 'Procurement Group', help="Moves created through this orderpoint will be put in this procurement group. If none is given, the moves generated by procurement rules will be grouped into one big picking.", copy=False), 'company_id': fields.many2one('res.company', 'Company', required=True), } _defaults = { 'active': lambda *a: 1, 'logic': lambda *a: 'max', 'qty_multiple': lambda *a: 1, 'name': lambda self, cr, uid, context: self.pool.get('ir.sequence').get(cr, uid, 'stock.orderpoint') or '', 'product_uom': lambda self, cr, uid, context: context.get('product_uom', False), 'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.warehouse.orderpoint', context=context) } _sql_constraints = [ ('qty_multiple_check', 'CHECK( qty_multiple >= 0 )', 'Qty Multiple must be greater than or equal to zero.'), ] _constraints = [ (_check_product_uom, 'You have to select a product unit of measure in the same category than the default unit of measure of the product', ['product_id', 'product_uom']), ] def default_get(self, cr, uid, fields, context=None): warehouse_obj = self.pool.get('stock.warehouse') res = super(stock_warehouse_orderpoint, self).default_get(cr, uid, fields, context) # default 'warehouse_id' and 'location_id' if 'warehouse_id' not in res: warehouse_ids = res.get('company_id') and warehouse_obj.search(cr, uid, [('company_id', '=', res['company_id'])], limit=1, context=context) or [] res['warehouse_id'] = warehouse_ids and warehouse_ids[0] or False if 'location_id' not in res: res['location_id'] = res.get('warehouse_id') and warehouse_obj.browse(cr, uid, res['warehouse_id'], context).lot_stock_id.id or False return res def onchange_warehouse_id(self, cr, uid, ids, warehouse_id, context=None): """ Finds location id for changed warehouse. @param warehouse_id: Changed id of warehouse. @return: Dictionary of values. """ if warehouse_id: w = self.pool.get('stock.warehouse').browse(cr, uid, warehouse_id, context=context) v = {'location_id': w.lot_stock_id.id} return {'value': v} return {} def onchange_product_id(self, cr, uid, ids, product_id, context=None): """ Finds UoM for changed product. @param product_id: Changed id of product. @return: Dictionary of values. """ if product_id: prod = self.pool.get('product.product').browse(cr, uid, product_id, context=context) d = {'product_uom': [('category_id', '=', prod.uom_id.category_id.id)]} v = {'product_uom': prod.uom_id.id} return {'value': v, 'domain': d} return {'domain': {'product_uom': []}} class stock_picking_type(osv.osv): _name = "stock.picking.type" _description = "The picking type determines the picking view" _order = 'sequence' def open_barcode_interface(self, cr, uid, ids, context=None): final_url = "/barcode/web/#action=stock.ui&picking_type_id=" + str(ids[0]) if len(ids) else '0' return {'type': 'ir.actions.act_url', 'url': final_url, 'target': 'self'} def _get_tristate_values(self, cr, uid, ids, field_name, arg, context=None): picking_obj = self.pool.get('stock.picking') res = {} for picking_type_id in ids: #get last 10 pickings of this type picking_ids = picking_obj.search(cr, uid, [('picking_type_id', '=', picking_type_id), ('state', '=', 'done')], order='date_done desc', limit=10, context=context) tristates = [] for picking in picking_obj.browse(cr, uid, picking_ids, context=context): if picking.date_done > picking.date: tristates.insert(0, {'tooltip': picking.name or '' + ": " + _('Late'), 'value': -1}) elif picking.backorder_id: tristates.insert(0, {'tooltip': picking.name or '' + ": " + _('Backorder exists'), 'value': 0}) else: tristates.insert(0, {'tooltip': picking.name or '' + ": " + _('OK'), 'value': 1}) res[picking_type_id] = json.dumps(tristates) return res def _get_picking_count(self, cr, uid, ids, field_names, arg, context=None): obj = self.pool.get('stock.picking') domains = { 'count_picking_draft': [('state', '=', 'draft')], 'count_picking_waiting': [('state', '=', 'confirmed')], 'count_picking_ready': [('state', 'in', ('assigned', 'partially_available'))], 'count_picking': [('state', 'in', ('assigned', 'waiting', 'confirmed', 'partially_available'))], 'count_picking_late': [('min_date', '<', time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)), ('state', 'in', ('assigned', 'waiting', 'confirmed', 'partially_available'))], 'count_picking_backorders': [('backorder_id', '!=', False), ('state', 'in', ('confirmed', 'assigned', 'waiting', 'partially_available'))], } result = {} for field in domains: data = obj.read_group(cr, uid, domains[field] + [('state', 'not in', ('done', 'cancel')), ('picking_type_id', 'in', ids)], ['picking_type_id'], ['picking_type_id'], context=context) count = dict(map(lambda x: (x['picking_type_id'] and x['picking_type_id'][0], x['picking_type_id_count']), data)) for tid in ids: result.setdefault(tid, {})[field] = count.get(tid, 0) for tid in ids: if result[tid]['count_picking']: result[tid]['rate_picking_late'] = result[tid]['count_picking_late'] * 100 / result[tid]['count_picking'] result[tid]['rate_picking_backorders'] = result[tid]['count_picking_backorders'] * 100 / result[tid]['count_picking'] else: result[tid]['rate_picking_late'] = 0 result[tid]['rate_picking_backorders'] = 0 return result def onchange_picking_code(self, cr, uid, ids, picking_code=False): if not picking_code: return False obj_data = self.pool.get('ir.model.data') stock_loc = obj_data.xmlid_to_res_id(cr, uid, 'stock.stock_location_stock') result = { 'default_location_src_id': stock_loc, 'default_location_dest_id': stock_loc, } if picking_code == 'incoming': result['default_location_src_id'] = obj_data.xmlid_to_res_id(cr, uid, 'stock.stock_location_suppliers') elif picking_code == 'outgoing': result['default_location_dest_id'] = obj_data.xmlid_to_res_id(cr, uid, 'stock.stock_location_customers') return {'value': result} def _get_name(self, cr, uid, ids, field_names, arg, context=None): return dict(self.name_get(cr, uid, ids, context=context)) def name_get(self, cr, uid, ids, context=None): """Overides orm name_get method to display 'Warehouse_name: PickingType_name' """ if context is None: context = {} if not isinstance(ids, list): ids = [ids] res = [] if not ids: return res for record in self.browse(cr, uid, ids, context=context): name = record.name if record.warehouse_id: name = record.warehouse_id.name + ': ' +name if context.get('special_shortened_wh_name'): if record.warehouse_id: name = record.warehouse_id.name else: name = _('Customer') + ' (' + record.name + ')' res.append((record.id, name)) return res def _default_warehouse(self, cr, uid, context=None): user = self.pool.get('res.users').browse(cr, uid, uid, context) res = self.pool.get('stock.warehouse').search(cr, uid, [('company_id', '=', user.company_id.id)], limit=1, context=context) return res and res[0] or False _columns = { 'name': fields.char('Picking Type Name', translate=True, required=True), 'complete_name': fields.function(_get_name, type='char', string='Name'), 'color': fields.integer('Color'), 'sequence': fields.integer('Sequence', help="Used to order the 'All Operations' kanban view"), 'sequence_id': fields.many2one('ir.sequence', 'Reference Sequence', required=True), 'default_location_src_id': fields.many2one('stock.location', 'Default Source Location'), 'default_location_dest_id': fields.many2one('stock.location', 'Default Destination Location'), 'code': fields.selection([('incoming', 'Suppliers'), ('outgoing', 'Customers'), ('internal', 'Internal')], 'Type of Operation', required=True), 'return_picking_type_id': fields.many2one('stock.picking.type', 'Picking Type for Returns'), 'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', ondelete='cascade'), 'active': fields.boolean('Active'), # Statistics for the kanban view 'last_done_picking': fields.function(_get_tristate_values, type='char', string='Last 10 Done Pickings'), 'count_picking_draft': fields.function(_get_picking_count, type='integer', multi='_get_picking_count'), 'count_picking_ready': fields.function(_get_picking_count, type='integer', multi='_get_picking_count'), 'count_picking': fields.function(_get_picking_count, type='integer', multi='_get_picking_count'), 'count_picking_waiting': fields.function(_get_picking_count, type='integer', multi='_get_picking_count'), 'count_picking_late': fields.function(_get_picking_count, type='integer', multi='_get_picking_count'), 'count_picking_backorders': fields.function(_get_picking_count, type='integer', multi='_get_picking_count'), 'rate_picking_late': fields.function(_get_picking_count, type='integer', multi='_get_picking_count'), 'rate_picking_backorders': fields.function(_get_picking_count, type='integer', multi='_get_picking_count'), } _defaults = { 'warehouse_id': _default_warehouse, 'active': True, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
tangyiyong/odoo
addons/stock/stock.py
Python
agpl-3.0
270,177
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ignite.spark import java.lang.Boolean import java.sql.Timestamp import java.util.Date import org.apache.ignite.spark.IgniteRDDSpec.ScalarCacheQuerySqlField class EntityTestAllTypeFields( @ScalarCacheQuerySqlField(index = true) val boolVal: Boolean, @ScalarCacheQuerySqlField(index = true) val byteVal: Byte, @ScalarCacheQuerySqlField(index = true) val shortVal: Short, @ScalarCacheQuerySqlField(index = true) val intVal: Int, @ScalarCacheQuerySqlField(index = true) val longVal: Long, @ScalarCacheQuerySqlField(index = true) val floatVal: Float, @ScalarCacheQuerySqlField(index = true) val doubleVal: Double, @ScalarCacheQuerySqlField(index = true) val strVal: String, @ScalarCacheQuerySqlField(index = true) val dateVal: Date, @ScalarCacheQuerySqlField(index = true) val timestampVal: Timestamp, @ScalarCacheQuerySqlField(index = true) val byteArrVal: Array[Byte], @ScalarCacheQuerySqlField(index = true) val bigDecVal: java.math.BigDecimal, @ScalarCacheQuerySqlField(index = true) val javaSqlDate: java.sql.Date ) extends Serializable { def this( i: Int ) { this( i % 2 == 0, // Boolean i.toByte, // Byte i.toShort, // Short i, // Int i.toLong, // Long i, // Float i, // Double "name" + i, // String new Date(i), new Timestamp(i), Array(i.toByte, i.toByte), new java.math.BigDecimal(i.toString), new java.sql.Date(i)) } }
irudyak/ignite
modules/spark/src/test/scala/org/apache/ignite/spark/EntityTestAllTypeFields.scala
Scala
apache-2.0
2,451
/* * descriptions + helper functions for simple dvb plls. */ #ifndef __DVB_PLL_H__ #define __DVB_PLL_H__ #include <linux/i2c.h> #include "dvb_frontend.h" #define DVB_PLL_UNDEFINED 0 #define DVB_PLL_THOMSON_DTT7579 1 #define DVB_PLL_THOMSON_DTT759X 2 #define DVB_PLL_LG_Z201 3 #define DVB_PLL_UNKNOWN_1 4 #define DVB_PLL_TUA6010XS 5 #define DVB_PLL_ENV57H1XD5 6 #define DVB_PLL_TUA6034 7 #define DVB_PLL_TDA665X 8 #define DVB_PLL_TDED4 9 #define DVB_PLL_TDHU2 10 #define DVB_PLL_SAMSUNG_TBMV 11 #define DVB_PLL_PHILIPS_SD1878_TDA8261 12 #define DVB_PLL_OPERA1 13 #define DVB_PLL_SAMSUNG_DTOS403IH102A 14 #define DVB_PLL_SAMSUNG_TDTC9251DH0 15 #define DVB_PLL_SAMSUNG_TBDU18132 16 #define DVB_PLL_SAMSUNG_TBMU24112 17 #define DVB_PLL_TDEE4 18 #define DVB_PLL_THOMSON_DTT7520X 19 /** * Attach a dvb-pll to the supplied frontend structure. * * @param fe Frontend to attach to. * @param pll_addr i2c address of the PLL (if used). * @param i2c i2c adapter to use (set to NULL if not used). * @param pll_desc_id dvb_pll_desc to use. * @return Frontend pointer on success, NULL on failure */ #if IS_REACHABLE(CONFIG_DVB_PLL) extern struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr, struct i2c_adapter *i2c, unsigned int pll_desc_id); #else static inline struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr, struct i2c_adapter *i2c, unsigned int pll_desc_id) { printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); return NULL; } #endif #endif
AiJiaZone/linux-4.0
virt/drivers/media/dvb-frontends/dvb-pll.h
C
gpl-2.0
1,768
@echo off rem rem Licensed to the Apache Software Foundation (ASF) under one or more rem contributor license agreements. See the NOTICE file distributed with rem this work for additional information regarding copyright ownership. rem The ASF licenses this file to You under the Apache License, Version 2.0 rem (the "License"); you may not use this file except in compliance with rem the License. You may obtain a copy of the License at rem rem http://www.apache.org/licenses/LICENSE-2.0 rem rem Unless required by applicable law or agreed to in writing, software rem distributed under the License is distributed on an "AS IS" BASIS, rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. rem See the License for the specific language governing permissions and rem limitations under the License. rem cmd /V /E /C "%~dp0spark-class.cmd" org.apache.hive.beeline.BeeLine %*
alec-heif/MIT-Thesis
spark-bin/bin/beeline.cmd
Batchfile
mit
899
package cliconfig import ( "fmt" "io" "os" "path/filepath" "github.com/docker/docker/api/types" "github.com/docker/docker/cliconfig/configfile" "github.com/docker/docker/pkg/homedir" ) const ( // ConfigFileName is the name of config file ConfigFileName = "config.json" configFileDir = ".docker" oldConfigfile = ".dockercfg" ) var ( configDir = os.Getenv("DOCKER_CONFIG") ) func init() { if configDir == "" { configDir = filepath.Join(homedir.Get(), configFileDir) } } // ConfigDir returns the directory the configuration file is stored in func ConfigDir() string { return configDir } // SetConfigDir sets the directory the configuration file is stored in func SetConfigDir(dir string) { configDir = dir } // NewConfigFile initializes an empty configuration file for the given filename 'fn' func NewConfigFile(fn string) *configfile.ConfigFile { return &configfile.ConfigFile{ AuthConfigs: make(map[string]types.AuthConfig), HTTPHeaders: make(map[string]string), Filename: fn, } } // LegacyLoadFromReader is a convenience function that creates a ConfigFile object from // a non-nested reader func LegacyLoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { configFile := configfile.ConfigFile{ AuthConfigs: make(map[string]types.AuthConfig), } err := configFile.LegacyLoadFromReader(configData) return &configFile, err } // LoadFromReader is a convenience function that creates a ConfigFile object from // a reader func LoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { configFile := configfile.ConfigFile{ AuthConfigs: make(map[string]types.AuthConfig), } err := configFile.LoadFromReader(configData) return &configFile, err } // Load reads the configuration files in the given directory, and sets up // the auth config information and returns values. // FIXME: use the internal golang config parser func Load(configDir string) (*configfile.ConfigFile, error) { if configDir == "" { configDir = ConfigDir() } configFile := configfile.ConfigFile{ AuthConfigs: make(map[string]types.AuthConfig), Filename: filepath.Join(configDir, ConfigFileName), } // Try happy path first - latest config file if _, err := os.Stat(configFile.Filename); err == nil { file, err := os.Open(configFile.Filename) if err != nil { return &configFile, fmt.Errorf("%s - %v", configFile.Filename, err) } defer file.Close() err = configFile.LoadFromReader(file) if err != nil { err = fmt.Errorf("%s - %v", configFile.Filename, err) } return &configFile, err } else if !os.IsNotExist(err) { // if file is there but we can't stat it for any reason other // than it doesn't exist then stop return &configFile, fmt.Errorf("%s - %v", configFile.Filename, err) } // Can't find latest config file so check for the old one confFile := filepath.Join(homedir.Get(), oldConfigfile) if _, err := os.Stat(confFile); err != nil { return &configFile, nil //missing file is not an error } file, err := os.Open(confFile) if err != nil { return &configFile, fmt.Errorf("%s - %v", confFile, err) } defer file.Close() err = configFile.LegacyLoadFromReader(file) if err != nil { return &configFile, fmt.Errorf("%s - %v", confFile, err) } if configFile.HTTPHeaders == nil { configFile.HTTPHeaders = map[string]string{} } return &configFile, nil }
amshinde/shim
vendor/github.com/moby/moby/cliconfig/config.go
GO
apache-2.0
3,360
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. var embedder = {}; embedder.tests = {}; embedder.baseGuestURL = ''; embedder.guestURL = ''; // Sends a message to WebViewTest denoting it is done and test // has failed. embedder.failTest = function(msg) { window.console.log('test failure, reason: ' + msg); chrome.test.sendMessage('TEST_FAILED'); }; // Sends a message to WebViewTest denoting it is done and test // has succeeded. embedder.maybePassTest = function() { window.console.log('test passed'); chrome.test.sendMessage('TEST_PASSED'); }; /** @private */ embedder.setUpGuest_ = function() { document.querySelector('#webview-tag-container').innerHTML = '<webview style="width: 100px; height: 100px;"' + ' src="' + embedder.guestURL + '"' + '></webview>'; var webview = document.querySelector('webview'); if (!webview) { embedder.failTest('No <webview> element created'); return null; } return webview; }; /** @private */ embedder.setUpLoadStop_ = function(webview) { var onWebViewLoadStop = function(e) { window.console.log('onWebViewLoadStop'); // Send post message to <webview> when it's ready to receive them. // This will make the guest start issueing media request. We do not // worry about the Javascript outcome. MockWebContestsDelegate in // WebViewTest will take care of that. webview.contentWindow.postMessage( JSON.stringify(['get-sources-permission']), '*'); }; webview.addEventListener('loadstop', onWebViewLoadStop); }; // The test loads a guest which requests media sources, which will in turn check // for media access permission. // // Note that this is a manually run test, not using chrome.test.runTests. // This is because we want to wait for MockWebContestsDelegate to catch the // media access check and not actually do a check. // Entry point for test, called by WebViewTest. function startCheckTest(testName) { chrome.test.getConfig(function(config) { embedder.baseGuestURL = 'http://localhost:' + config.testServer.port; embedder.guestURL = embedder.baseGuestURL + '/extensions/platform_apps/web_view/media_access' + '/media_check_guest.html'; chrome.test.log('Guest url is: ' + embedder.guestURL); var webview = embedder.setUpGuest_(); if (!webview) { return; } embedder.setUpLoadStop_(webview); webview.addEventListener('consolemessage', function(e) { window.console.log(e.message); }); window.addEventListener('message', function(e) { var data = JSON.parse(e.data); if (data[0] == 'got-sources') { embedder.maybePassTest(); } else { window.console.log('Unexpected message: ' + e.message); } }); }); } onload = function() { chrome.test.sendMessage('Launched'); };
ric2b/Vivaldi-browser
chromium/chrome/test/data/extensions/platform_apps/web_view/media_access/check/embedder.js
JavaScript
bsd-3-clause
2,927
/* * Copyright (C) 2012 Sascha Hauer, Pengutronix <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/mm.h> #include <linux/delay.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/clkdev.h> #include <linux/of.h> #include <linux/err.h> #include <mach/hardware.h> #include <mach/common.h> #include "crmregs-imx3.h" #include "clk.h" struct arm_ahb_div { unsigned char arm, ahb, sel; }; static struct arm_ahb_div clk_consumer[] = { { .arm = 1, .ahb = 4, .sel = 0}, { .arm = 1, .ahb = 3, .sel = 1}, { .arm = 2, .ahb = 2, .sel = 0}, { .arm = 0, .ahb = 0, .sel = 0}, { .arm = 0, .ahb = 0, .sel = 0}, { .arm = 0, .ahb = 0, .sel = 0}, { .arm = 4, .ahb = 1, .sel = 0}, { .arm = 1, .ahb = 5, .sel = 0}, { .arm = 1, .ahb = 8, .sel = 0}, { .arm = 1, .ahb = 6, .sel = 1}, { .arm = 2, .ahb = 4, .sel = 0}, { .arm = 0, .ahb = 0, .sel = 0}, { .arm = 0, .ahb = 0, .sel = 0}, { .arm = 0, .ahb = 0, .sel = 0}, { .arm = 4, .ahb = 2, .sel = 0}, { .arm = 0, .ahb = 0, .sel = 0}, }; static char hsp_div_532[] = { 4, 8, 3, 0 }; static char hsp_div_400[] = { 3, 6, 3, 0 }; static const char *std_sel[] = {"ppll", "arm"}; static const char *ipg_per_sel[] = {"ahb_per_div", "arm_per_div"}; enum mx35_clks { ckih, mpll, ppll, mpll_075, arm, hsp, hsp_div, hsp_sel, ahb, ipg, arm_per_div, ahb_per_div, ipg_per, uart_sel, uart_div, esdhc_sel, esdhc1_div, esdhc2_div, esdhc3_div, spdif_sel, spdif_div_pre, spdif_div_post, ssi_sel, ssi1_div_pre, ssi1_div_post, ssi2_div_pre, ssi2_div_post, usb_sel, usb_div, nfc_div, asrc_gate, pata_gate, audmux_gate, can1_gate, can2_gate, cspi1_gate, cspi2_gate, ect_gate, edio_gate, emi_gate, epit1_gate, epit2_gate, esai_gate, esdhc1_gate, esdhc2_gate, esdhc3_gate, fec_gate, gpio1_gate, gpio2_gate, gpio3_gate, gpt_gate, i2c1_gate, i2c2_gate, i2c3_gate, iomuxc_gate, ipu_gate, kpp_gate, mlb_gate, mshc_gate, owire_gate, pwm_gate, rngc_gate, rtc_gate, rtic_gate, scc_gate, sdma_gate, spba_gate, spdif_gate, ssi1_gate, ssi2_gate, uart1_gate, uart2_gate, uart3_gate, usbotg_gate, wdog_gate, max_gate, admux_gate, csi_gate, iim_gate, gpu2d_gate, clk_max }; static struct clk *clk[clk_max]; int __init mx35_clocks_init() { void __iomem *base = MX35_IO_ADDRESS(MX35_CCM_BASE_ADDR); u32 pdr0, consumer_sel, hsp_sel; struct arm_ahb_div *aad; unsigned char *hsp_div; int i; pdr0 = __raw_readl(base + MXC_CCM_PDR0); consumer_sel = (pdr0 >> 16) & 0xf; aad = &clk_consumer[consumer_sel]; if (!aad->arm) { pr_err("i.MX35 clk: illegal consumer mux selection 0x%x\n", consumer_sel); /* * We are basically stuck. Continue with a default entry and hope we * get far enough to actually show the above message */ aad = &clk_consumer[0]; } clk[ckih] = imx_clk_fixed("ckih", 24000000); clk[mpll] = imx_clk_pllv1("mpll", "ckih", base + MX35_CCM_MPCTL); clk[ppll] = imx_clk_pllv1("ppll", "ckih", base + MX35_CCM_PPCTL); clk[mpll] = imx_clk_fixed_factor("mpll_075", "mpll", 3, 4); if (aad->sel) clk[arm] = imx_clk_fixed_factor("arm", "mpll_075", 1, aad->arm); else clk[arm] = imx_clk_fixed_factor("arm", "mpll", 1, aad->arm); if (clk_get_rate(clk[arm]) > 400000000) hsp_div = hsp_div_532; else hsp_div = hsp_div_400; hsp_sel = (pdr0 >> 20) & 0x3; if (!hsp_div[hsp_sel]) { pr_err("i.MX35 clk: illegal hsp clk selection 0x%x\n", hsp_sel); hsp_sel = 0; } clk[hsp] = imx_clk_fixed_factor("hsp", "arm", 1, hsp_div[hsp_sel]); clk[ahb] = imx_clk_fixed_factor("ahb", "arm", 1, aad->ahb); clk[ipg] = imx_clk_fixed_factor("ipg", "ahb", 1, 2); clk[arm_per_div] = imx_clk_divider("arm_per_div", "arm", base + MX35_CCM_PDR4, 16, 6); clk[ahb_per_div] = imx_clk_divider("ahb_per_div", "ahb", base + MXC_CCM_PDR0, 12, 3); clk[ipg_per] = imx_clk_mux("ipg_per", base + MXC_CCM_PDR0, 26, 1, ipg_per_sel, ARRAY_SIZE(ipg_per_sel)); clk[uart_sel] = imx_clk_mux("uart_sel", base + MX35_CCM_PDR3, 14, 1, std_sel, ARRAY_SIZE(std_sel)); clk[uart_div] = imx_clk_divider("uart_div", "uart_sel", base + MX35_CCM_PDR4, 10, 6); clk[esdhc_sel] = imx_clk_mux("esdhc_sel", base + MX35_CCM_PDR4, 9, 1, std_sel, ARRAY_SIZE(std_sel)); clk[esdhc1_div] = imx_clk_divider("esdhc1_div", "esdhc_sel", base + MX35_CCM_PDR3, 0, 6); clk[esdhc2_div] = imx_clk_divider("esdhc2_div", "esdhc_sel", base + MX35_CCM_PDR3, 8, 6); clk[esdhc3_div] = imx_clk_divider("esdhc3_div", "esdhc_sel", base + MX35_CCM_PDR3, 16, 6); clk[spdif_sel] = imx_clk_mux("spdif_sel", base + MX35_CCM_PDR3, 22, 1, std_sel, ARRAY_SIZE(std_sel)); clk[spdif_div_pre] = imx_clk_divider("spdif_div_pre", "spdif_sel", base + MX35_CCM_PDR3, 29, 3); /* divide by 1 not allowed */ clk[spdif_div_post] = imx_clk_divider("spdif_div_post", "spdif_div_pre", base + MX35_CCM_PDR3, 23, 6); clk[ssi_sel] = imx_clk_mux("ssi_sel", base + MX35_CCM_PDR2, 6, 1, std_sel, ARRAY_SIZE(std_sel)); clk[ssi1_div_pre] = imx_clk_divider("ssi1_div_pre", "ssi_sel", base + MX35_CCM_PDR2, 24, 3); clk[ssi1_div_post] = imx_clk_divider("ssi1_div_post", "ssi1_div_pre", base + MX35_CCM_PDR2, 0, 6); clk[ssi2_div_pre] = imx_clk_divider("ssi2_div_pre", "ssi_sel", base + MX35_CCM_PDR2, 27, 3); clk[ssi2_div_post] = imx_clk_divider("ssi2_div_post", "ssi2_div_pre", base + MX35_CCM_PDR2, 8, 6); clk[usb_sel] = imx_clk_mux("usb_sel", base + MX35_CCM_PDR4, 9, 1, std_sel, ARRAY_SIZE(std_sel)); clk[usb_div] = imx_clk_divider("usb_div", "usb_sel", base + MX35_CCM_PDR4, 22, 6); clk[nfc_div] = imx_clk_divider("nfc_div", "ahb", base + MX35_CCM_PDR4, 28, 4); clk[asrc_gate] = imx_clk_gate2("asrc_gate", "ipg", base + MX35_CCM_CGR0, 0); clk[pata_gate] = imx_clk_gate2("pata_gate", "ipg", base + MX35_CCM_CGR0, 2); clk[audmux_gate] = imx_clk_gate2("audmux_gate", "ipg", base + MX35_CCM_CGR0, 4); clk[can1_gate] = imx_clk_gate2("can1_gate", "ipg", base + MX35_CCM_CGR0, 6); clk[can2_gate] = imx_clk_gate2("can2_gate", "ipg", base + MX35_CCM_CGR0, 8); clk[cspi1_gate] = imx_clk_gate2("cspi1_gate", "ipg", base + MX35_CCM_CGR0, 10); clk[cspi2_gate] = imx_clk_gate2("cspi2_gate", "ipg", base + MX35_CCM_CGR0, 12); clk[ect_gate] = imx_clk_gate2("ect_gate", "ipg", base + MX35_CCM_CGR0, 14); clk[edio_gate] = imx_clk_gate2("edio_gate", "ipg", base + MX35_CCM_CGR0, 16); clk[emi_gate] = imx_clk_gate2("emi_gate", "ipg", base + MX35_CCM_CGR0, 18); clk[epit1_gate] = imx_clk_gate2("epit1_gate", "ipg", base + MX35_CCM_CGR0, 20); clk[epit2_gate] = imx_clk_gate2("epit2_gate", "ipg", base + MX35_CCM_CGR0, 22); clk[esai_gate] = imx_clk_gate2("esai_gate", "ipg", base + MX35_CCM_CGR0, 24); clk[esdhc1_gate] = imx_clk_gate2("esdhc1_gate", "esdhc1_div", base + MX35_CCM_CGR0, 26); clk[esdhc2_gate] = imx_clk_gate2("esdhc2_gate", "esdhc2_div", base + MX35_CCM_CGR0, 28); clk[esdhc3_gate] = imx_clk_gate2("esdhc3_gate", "esdhc3_div", base + MX35_CCM_CGR0, 30); clk[fec_gate] = imx_clk_gate2("fec_gate", "ipg", base + MX35_CCM_CGR1, 0); clk[gpio1_gate] = imx_clk_gate2("gpio1_gate", "ipg", base + MX35_CCM_CGR1, 2); clk[gpio2_gate] = imx_clk_gate2("gpio2_gate", "ipg", base + MX35_CCM_CGR1, 4); clk[gpio3_gate] = imx_clk_gate2("gpio3_gate", "ipg", base + MX35_CCM_CGR1, 6); clk[gpt_gate] = imx_clk_gate2("gpt_gate", "ipg", base + MX35_CCM_CGR1, 8); clk[i2c1_gate] = imx_clk_gate2("i2c1_gate", "ipg_per", base + MX35_CCM_CGR1, 10); clk[i2c2_gate] = imx_clk_gate2("i2c2_gate", "ipg_per", base + MX35_CCM_CGR1, 12); clk[i2c3_gate] = imx_clk_gate2("i2c3_gate", "ipg_per", base + MX35_CCM_CGR1, 14); clk[iomuxc_gate] = imx_clk_gate2("iomuxc_gate", "ipg", base + MX35_CCM_CGR1, 16); clk[ipu_gate] = imx_clk_gate2("ipu_gate", "hsp", base + MX35_CCM_CGR1, 18); clk[kpp_gate] = imx_clk_gate2("kpp_gate", "ipg", base + MX35_CCM_CGR1, 20); clk[mlb_gate] = imx_clk_gate2("mlb_gate", "ahb", base + MX35_CCM_CGR1, 22); clk[mshc_gate] = imx_clk_gate2("mshc_gate", "dummy", base + MX35_CCM_CGR1, 24); clk[owire_gate] = imx_clk_gate2("owire_gate", "ipg_per", base + MX35_CCM_CGR1, 26); clk[pwm_gate] = imx_clk_gate2("pwm_gate", "ipg_per", base + MX35_CCM_CGR1, 28); clk[rngc_gate] = imx_clk_gate2("rngc_gate", "ipg", base + MX35_CCM_CGR1, 30); clk[rtc_gate] = imx_clk_gate2("rtc_gate", "ipg", base + MX35_CCM_CGR2, 0); clk[rtic_gate] = imx_clk_gate2("rtic_gate", "ahb", base + MX35_CCM_CGR2, 2); clk[scc_gate] = imx_clk_gate2("scc_gate", "ipg", base + MX35_CCM_CGR2, 4); clk[sdma_gate] = imx_clk_gate2("sdma_gate", "ahb", base + MX35_CCM_CGR2, 6); clk[spba_gate] = imx_clk_gate2("spba_gate", "ipg", base + MX35_CCM_CGR2, 8); clk[spdif_gate] = imx_clk_gate2("spdif_gate", "spdif_div_post", base + MX35_CCM_CGR2, 10); clk[ssi1_gate] = imx_clk_gate2("ssi1_gate", "ssi1_div_post", base + MX35_CCM_CGR2, 12); clk[ssi2_gate] = imx_clk_gate2("ssi2_gate", "ssi2_div_post", base + MX35_CCM_CGR2, 14); clk[uart1_gate] = imx_clk_gate2("uart1_gate", "uart_div", base + MX35_CCM_CGR2, 16); clk[uart2_gate] = imx_clk_gate2("uart2_gate", "uart_div", base + MX35_CCM_CGR2, 18); clk[uart3_gate] = imx_clk_gate2("uart3_gate", "uart_div", base + MX35_CCM_CGR2, 20); clk[usbotg_gate] = imx_clk_gate2("usbotg_gate", "ahb", base + MX35_CCM_CGR2, 22); clk[wdog_gate] = imx_clk_gate2("wdog_gate", "ipg", base + MX35_CCM_CGR2, 24); clk[max_gate] = imx_clk_gate2("max_gate", "dummy", base + MX35_CCM_CGR2, 26); clk[admux_gate] = imx_clk_gate2("admux_gate", "ipg", base + MX35_CCM_CGR2, 30); clk[csi_gate] = imx_clk_gate2("csi_gate", "ipg", base + MX35_CCM_CGR3, 0); clk[iim_gate] = imx_clk_gate2("iim_gate", "ipg", base + MX35_CCM_CGR3, 2); clk[gpu2d_gate] = imx_clk_gate2("gpu2d_gate", "ahb", base + MX35_CCM_CGR3, 4); for (i = 0; i < ARRAY_SIZE(clk); i++) if (IS_ERR(clk[i])) pr_err("i.MX35 clk %d: register failed with %ld\n", i, PTR_ERR(clk[i])); clk_register_clkdev(clk[pata_gate], NULL, "pata_imx"); clk_register_clkdev(clk[can1_gate], NULL, "flexcan.0"); clk_register_clkdev(clk[can2_gate], NULL, "flexcan.1"); clk_register_clkdev(clk[cspi1_gate], "per", "imx35-cspi.0"); clk_register_clkdev(clk[cspi1_gate], "ipg", "imx35-cspi.0"); clk_register_clkdev(clk[cspi2_gate], "per", "imx35-cspi.1"); clk_register_clkdev(clk[cspi2_gate], "ipg", "imx35-cspi.1"); clk_register_clkdev(clk[epit1_gate], NULL, "imx-epit.0"); clk_register_clkdev(clk[epit2_gate], NULL, "imx-epit.1"); clk_register_clkdev(clk[esdhc1_gate], "per", "sdhci-esdhc-imx35.0"); clk_register_clkdev(clk[ipg], "ipg", "sdhci-esdhc-imx35.0"); clk_register_clkdev(clk[ahb], "ahb", "sdhci-esdhc-imx35.0"); clk_register_clkdev(clk[esdhc2_gate], "per", "sdhci-esdhc-imx35.1"); clk_register_clkdev(clk[ipg], "ipg", "sdhci-esdhc-imx35.1"); clk_register_clkdev(clk[ahb], "ahb", "sdhci-esdhc-imx35.1"); clk_register_clkdev(clk[esdhc3_gate], "per", "sdhci-esdhc-imx35.2"); clk_register_clkdev(clk[ipg], "ipg", "sdhci-esdhc-imx35.2"); clk_register_clkdev(clk[ahb], "ahb", "sdhci-esdhc-imx35.2"); /* i.mx35 has the i.mx27 type fec */ clk_register_clkdev(clk[fec_gate], NULL, "imx27-fec.0"); clk_register_clkdev(clk[gpt_gate], "per", "imx-gpt.0"); clk_register_clkdev(clk[ipg], "ipg", "imx-gpt.0"); clk_register_clkdev(clk[i2c1_gate], NULL, "imx-i2c.0"); clk_register_clkdev(clk[i2c2_gate], NULL, "imx-i2c.1"); clk_register_clkdev(clk[i2c3_gate], NULL, "imx-i2c.2"); clk_register_clkdev(clk[ipu_gate], NULL, "ipu-core"); clk_register_clkdev(clk[ipu_gate], NULL, "mx3_sdc_fb"); clk_register_clkdev(clk[owire_gate], NULL, "mxc_w1"); clk_register_clkdev(clk[sdma_gate], NULL, "imx35-sdma"); clk_register_clkdev(clk[ipg], "ipg", "imx-ssi.0"); clk_register_clkdev(clk[ssi1_div_post], "per", "imx-ssi.0"); clk_register_clkdev(clk[ipg], "ipg", "imx-ssi.1"); clk_register_clkdev(clk[ssi2_div_post], "per", "imx-ssi.1"); /* i.mx35 has the i.mx21 type uart */ clk_register_clkdev(clk[uart1_gate], "per", "imx21-uart.0"); clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.0"); clk_register_clkdev(clk[uart2_gate], "per", "imx21-uart.1"); clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.1"); clk_register_clkdev(clk[uart3_gate], "per", "imx21-uart.2"); clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.2"); clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.0"); clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.0"); clk_register_clkdev(clk[usbotg_gate], "ahb", "mxc-ehci.0"); clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.1"); clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.1"); clk_register_clkdev(clk[usbotg_gate], "ahb", "mxc-ehci.1"); clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.2"); clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.2"); clk_register_clkdev(clk[usbotg_gate], "ahb", "mxc-ehci.2"); clk_register_clkdev(clk[usb_div], "per", "fsl-usb2-udc"); clk_register_clkdev(clk[ipg], "ipg", "fsl-usb2-udc"); clk_register_clkdev(clk[usbotg_gate], "ahb", "fsl-usb2-udc"); clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0"); clk_register_clkdev(clk[nfc_div], NULL, "mxc_nand.0"); clk_prepare_enable(clk[spba_gate]); clk_prepare_enable(clk[gpio1_gate]); clk_prepare_enable(clk[gpio2_gate]); clk_prepare_enable(clk[gpio3_gate]); clk_prepare_enable(clk[iim_gate]); clk_prepare_enable(clk[emi_gate]); /* * SCC is needed to boot via mmc after a watchdog reset. The clock code * before conversion to common clk also enabled UART1 (which isn't * handled here and not needed for mmc) and IIM (which is enabled * unconditionally above). */ clk_prepare_enable(clk[scc_gate]); imx_print_silicon_rev("i.MX35", mx35_revision()); #ifdef CONFIG_MXC_USE_EPIT epit_timer_init(MX35_IO_ADDRESS(MX35_EPIT1_BASE_ADDR), MX35_INT_EPIT1); #else mxc_timer_init(MX35_IO_ADDRESS(MX35_GPT1_BASE_ADDR), MX35_INT_GPT); #endif return 0; }
greguu/linux-3.5-borzoi
arch/arm/mach-imx/clk-imx35.c
C
gpl-2.0
13,763
/* * UEFI Common Platform Error Record (CPER) support * * Copyright (C) 2010, Intel Corp. * Author: Huang Ying <[email protected]> * * CPER is the format used to describe platform hardware error by * various tables, such as ERST, BERT and HEST etc. * * For more information about CPER, please refer to Appendix N of UEFI * Specification version 2.4. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/time.h> #include <linux/cper.h> #include <linux/dmi.h> #include <linux/acpi.h> #include <linux/pci.h> #include <linux/aer.h> #include <linux/printk.h> #include <linux/bcd.h> #include <acpi/ghes.h> #include <ras/ras_event.h> #define INDENT_SP " " static char rcd_decode_str[CPER_REC_LEN]; /* * CPER record ID need to be unique even after reboot, because record * ID is used as index for ERST storage, while CPER records from * multiple boot may co-exist in ERST. */ u64 cper_next_record_id(void) { static atomic64_t seq; if (!atomic64_read(&seq)) atomic64_set(&seq, ((u64)get_seconds()) << 32); return atomic64_inc_return(&seq); } EXPORT_SYMBOL_GPL(cper_next_record_id); static const char * const severity_strs[] = { "recoverable", "fatal", "corrected", "info", }; const char *cper_severity_str(unsigned int severity) { return severity < ARRAY_SIZE(severity_strs) ? severity_strs[severity] : "unknown"; } EXPORT_SYMBOL_GPL(cper_severity_str); /* * cper_print_bits - print strings for set bits * @pfx: prefix for each line, including log level and prefix string * @bits: bit mask * @strs: string array, indexed by bit position * @strs_size: size of the string array: @strs * * For each set bit in @bits, print the corresponding string in @strs. * If the output length is longer than 80, multiple line will be * printed, with @pfx is printed at the beginning of each line. */ void cper_print_bits(const char *pfx, unsigned int bits, const char * const strs[], unsigned int strs_size) { int i, len = 0; const char *str; char buf[84]; for (i = 0; i < strs_size; i++) { if (!(bits & (1U << i))) continue; str = strs[i]; if (!str) continue; if (len && len + strlen(str) + 2 > 80) { printk("%s\n", buf); len = 0; } if (!len) len = snprintf(buf, sizeof(buf), "%s%s", pfx, str); else len += snprintf(buf+len, sizeof(buf)-len, ", %s", str); } if (len) printk("%s\n", buf); } static const char * const proc_type_strs[] = { "IA32/X64", "IA64", "ARM", }; static const char * const proc_isa_strs[] = { "IA32", "IA64", "X64", "ARM A32/T32", "ARM A64", }; static const char * const proc_error_type_strs[] = { "cache error", "TLB error", "bus error", "micro-architectural error", }; static const char * const proc_op_strs[] = { "unknown or generic", "data read", "data write", "instruction execution", }; static const char * const proc_flag_strs[] = { "restartable", "precise IP", "overflow", "corrected", }; static void cper_print_proc_generic(const char *pfx, const struct cper_sec_proc_generic *proc) { if (proc->validation_bits & CPER_PROC_VALID_TYPE) printk("%s""processor_type: %d, %s\n", pfx, proc->proc_type, proc->proc_type < ARRAY_SIZE(proc_type_strs) ? proc_type_strs[proc->proc_type] : "unknown"); if (proc->validation_bits & CPER_PROC_VALID_ISA) printk("%s""processor_isa: %d, %s\n", pfx, proc->proc_isa, proc->proc_isa < ARRAY_SIZE(proc_isa_strs) ? proc_isa_strs[proc->proc_isa] : "unknown"); if (proc->validation_bits & CPER_PROC_VALID_ERROR_TYPE) { printk("%s""error_type: 0x%02x\n", pfx, proc->proc_error_type); cper_print_bits(pfx, proc->proc_error_type, proc_error_type_strs, ARRAY_SIZE(proc_error_type_strs)); } if (proc->validation_bits & CPER_PROC_VALID_OPERATION) printk("%s""operation: %d, %s\n", pfx, proc->operation, proc->operation < ARRAY_SIZE(proc_op_strs) ? proc_op_strs[proc->operation] : "unknown"); if (proc->validation_bits & CPER_PROC_VALID_FLAGS) { printk("%s""flags: 0x%02x\n", pfx, proc->flags); cper_print_bits(pfx, proc->flags, proc_flag_strs, ARRAY_SIZE(proc_flag_strs)); } if (proc->validation_bits & CPER_PROC_VALID_LEVEL) printk("%s""level: %d\n", pfx, proc->level); if (proc->validation_bits & CPER_PROC_VALID_VERSION) printk("%s""version_info: 0x%016llx\n", pfx, proc->cpu_version); if (proc->validation_bits & CPER_PROC_VALID_ID) printk("%s""processor_id: 0x%016llx\n", pfx, proc->proc_id); if (proc->validation_bits & CPER_PROC_VALID_TARGET_ADDRESS) printk("%s""target_address: 0x%016llx\n", pfx, proc->target_addr); if (proc->validation_bits & CPER_PROC_VALID_REQUESTOR_ID) printk("%s""requestor_id: 0x%016llx\n", pfx, proc->requestor_id); if (proc->validation_bits & CPER_PROC_VALID_RESPONDER_ID) printk("%s""responder_id: 0x%016llx\n", pfx, proc->responder_id); if (proc->validation_bits & CPER_PROC_VALID_IP) printk("%s""IP: 0x%016llx\n", pfx, proc->ip); } #if defined(CONFIG_ARM64) || defined(CONFIG_ARM) static const char * const arm_reg_ctx_strs[] = { "AArch32 general purpose registers", "AArch32 EL1 context registers", "AArch32 EL2 context registers", "AArch32 secure context registers", "AArch64 general purpose registers", "AArch64 EL1 context registers", "AArch64 EL2 context registers", "AArch64 EL3 context registers", "Misc. system register structure", }; static void cper_print_proc_arm(const char *pfx, const struct cper_sec_proc_arm *proc) { int i, len, max_ctx_type; struct cper_arm_err_info *err_info; struct cper_arm_ctx_info *ctx_info; char newpfx[64]; printk("%sMIDR: 0x%016llx\n", pfx, proc->midr); len = proc->section_length - (sizeof(*proc) + proc->err_info_num * (sizeof(*err_info))); if (len < 0) { printk("%ssection length: %d\n", pfx, proc->section_length); printk("%ssection length is too small\n", pfx); printk("%sfirmware-generated error record is incorrect\n", pfx); printk("%sERR_INFO_NUM is %d\n", pfx, proc->err_info_num); return; } if (proc->validation_bits & CPER_ARM_VALID_MPIDR) printk("%sMultiprocessor Affinity Register (MPIDR): 0x%016llx\n", pfx, proc->mpidr); if (proc->validation_bits & CPER_ARM_VALID_AFFINITY_LEVEL) printk("%serror affinity level: %d\n", pfx, proc->affinity_level); if (proc->validation_bits & CPER_ARM_VALID_RUNNING_STATE) { printk("%srunning state: 0x%x\n", pfx, proc->running_state); printk("%sPower State Coordination Interface state: %d\n", pfx, proc->psci_state); } snprintf(newpfx, sizeof(newpfx), "%s%s", pfx, INDENT_SP); err_info = (struct cper_arm_err_info *)(proc + 1); for (i = 0; i < proc->err_info_num; i++) { printk("%sError info structure %d:\n", pfx, i); printk("%snum errors: %d\n", pfx, err_info->multiple_error + 1); if (err_info->validation_bits & CPER_ARM_INFO_VALID_FLAGS) { if (err_info->flags & CPER_ARM_INFO_FLAGS_FIRST) printk("%sfirst error captured\n", newpfx); if (err_info->flags & CPER_ARM_INFO_FLAGS_LAST) printk("%slast error captured\n", newpfx); if (err_info->flags & CPER_ARM_INFO_FLAGS_PROPAGATED) printk("%spropagated error captured\n", newpfx); if (err_info->flags & CPER_ARM_INFO_FLAGS_OVERFLOW) printk("%soverflow occurred, error info is incomplete\n", newpfx); } printk("%serror_type: %d, %s\n", newpfx, err_info->type, err_info->type < ARRAY_SIZE(proc_error_type_strs) ? proc_error_type_strs[err_info->type] : "unknown"); if (err_info->validation_bits & CPER_ARM_INFO_VALID_ERR_INFO) printk("%serror_info: 0x%016llx\n", newpfx, err_info->error_info); if (err_info->validation_bits & CPER_ARM_INFO_VALID_VIRT_ADDR) printk("%svirtual fault address: 0x%016llx\n", newpfx, err_info->virt_fault_addr); if (err_info->validation_bits & CPER_ARM_INFO_VALID_PHYSICAL_ADDR) printk("%sphysical fault address: 0x%016llx\n", newpfx, err_info->physical_fault_addr); err_info += 1; } ctx_info = (struct cper_arm_ctx_info *)err_info; max_ctx_type = ARRAY_SIZE(arm_reg_ctx_strs) - 1; for (i = 0; i < proc->context_info_num; i++) { int size = sizeof(*ctx_info) + ctx_info->size; printk("%sContext info structure %d:\n", pfx, i); if (len < size) { printk("%ssection length is too small\n", newpfx); printk("%sfirmware-generated error record is incorrect\n", pfx); return; } if (ctx_info->type > max_ctx_type) { printk("%sInvalid context type: %d (max: %d)\n", newpfx, ctx_info->type, max_ctx_type); return; } printk("%sregister context type: %s\n", newpfx, arm_reg_ctx_strs[ctx_info->type]); print_hex_dump(newpfx, "", DUMP_PREFIX_OFFSET, 16, 4, (ctx_info + 1), ctx_info->size, 0); len -= size; ctx_info = (struct cper_arm_ctx_info *)((long)ctx_info + size); } if (len > 0) { printk("%sVendor specific error info has %u bytes:\n", pfx, len); print_hex_dump(newpfx, "", DUMP_PREFIX_OFFSET, 16, 4, ctx_info, len, true); } } #endif static const char * const mem_err_type_strs[] = { "unknown", "no error", "single-bit ECC", "multi-bit ECC", "single-symbol chipkill ECC", "multi-symbol chipkill ECC", "master abort", "target abort", "parity error", "watchdog timeout", "invalid address", "mirror Broken", "memory sparing", "scrub corrected error", "scrub uncorrected error", "physical memory map-out event", }; const char *cper_mem_err_type_str(unsigned int etype) { return etype < ARRAY_SIZE(mem_err_type_strs) ? mem_err_type_strs[etype] : "unknown"; } EXPORT_SYMBOL_GPL(cper_mem_err_type_str); static int cper_mem_err_location(struct cper_mem_err_compact *mem, char *msg) { u32 len, n; if (!msg) return 0; n = 0; len = CPER_REC_LEN - 1; if (mem->validation_bits & CPER_MEM_VALID_NODE) n += scnprintf(msg + n, len - n, "node: %d ", mem->node); if (mem->validation_bits & CPER_MEM_VALID_CARD) n += scnprintf(msg + n, len - n, "card: %d ", mem->card); if (mem->validation_bits & CPER_MEM_VALID_MODULE) n += scnprintf(msg + n, len - n, "module: %d ", mem->module); if (mem->validation_bits & CPER_MEM_VALID_RANK_NUMBER) n += scnprintf(msg + n, len - n, "rank: %d ", mem->rank); if (mem->validation_bits & CPER_MEM_VALID_BANK) n += scnprintf(msg + n, len - n, "bank: %d ", mem->bank); if (mem->validation_bits & CPER_MEM_VALID_DEVICE) n += scnprintf(msg + n, len - n, "device: %d ", mem->device); if (mem->validation_bits & CPER_MEM_VALID_ROW) n += scnprintf(msg + n, len - n, "row: %d ", mem->row); if (mem->validation_bits & CPER_MEM_VALID_COLUMN) n += scnprintf(msg + n, len - n, "column: %d ", mem->column); if (mem->validation_bits & CPER_MEM_VALID_BIT_POSITION) n += scnprintf(msg + n, len - n, "bit_position: %d ", mem->bit_pos); if (mem->validation_bits & CPER_MEM_VALID_REQUESTOR_ID) n += scnprintf(msg + n, len - n, "requestor_id: 0x%016llx ", mem->requestor_id); if (mem->validation_bits & CPER_MEM_VALID_RESPONDER_ID) n += scnprintf(msg + n, len - n, "responder_id: 0x%016llx ", mem->responder_id); if (mem->validation_bits & CPER_MEM_VALID_TARGET_ID) scnprintf(msg + n, len - n, "target_id: 0x%016llx ", mem->target_id); msg[n] = '\0'; return n; } static int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg) { u32 len, n; const char *bank = NULL, *device = NULL; if (!msg || !(mem->validation_bits & CPER_MEM_VALID_MODULE_HANDLE)) return 0; n = 0; len = CPER_REC_LEN - 1; dmi_memdev_name(mem->mem_dev_handle, &bank, &device); if (bank && device) n = snprintf(msg, len, "DIMM location: %s %s ", bank, device); else n = snprintf(msg, len, "DIMM location: not present. DMI handle: 0x%.4x ", mem->mem_dev_handle); msg[n] = '\0'; return n; } void cper_mem_err_pack(const struct cper_sec_mem_err *mem, struct cper_mem_err_compact *cmem) { cmem->validation_bits = mem->validation_bits; cmem->node = mem->node; cmem->card = mem->card; cmem->module = mem->module; cmem->bank = mem->bank; cmem->device = mem->device; cmem->row = mem->row; cmem->column = mem->column; cmem->bit_pos = mem->bit_pos; cmem->requestor_id = mem->requestor_id; cmem->responder_id = mem->responder_id; cmem->target_id = mem->target_id; cmem->rank = mem->rank; cmem->mem_array_handle = mem->mem_array_handle; cmem->mem_dev_handle = mem->mem_dev_handle; } const char *cper_mem_err_unpack(struct trace_seq *p, struct cper_mem_err_compact *cmem) { const char *ret = trace_seq_buffer_ptr(p); if (cper_mem_err_location(cmem, rcd_decode_str)) trace_seq_printf(p, "%s", rcd_decode_str); if (cper_dimm_err_location(cmem, rcd_decode_str)) trace_seq_printf(p, "%s", rcd_decode_str); trace_seq_putc(p, '\0'); return ret; } static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem, int len) { struct cper_mem_err_compact cmem; /* Don't trust UEFI 2.1/2.2 structure with bad validation bits */ if (len == sizeof(struct cper_sec_mem_err_old) && (mem->validation_bits & ~(CPER_MEM_VALID_RANK_NUMBER - 1))) { pr_err(FW_WARN "valid bits set for fields beyond structure\n"); return; } if (mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS) printk("%s""error_status: 0x%016llx\n", pfx, mem->error_status); if (mem->validation_bits & CPER_MEM_VALID_PA) printk("%s""physical_address: 0x%016llx\n", pfx, mem->physical_addr); if (mem->validation_bits & CPER_MEM_VALID_PA_MASK) printk("%s""physical_address_mask: 0x%016llx\n", pfx, mem->physical_addr_mask); cper_mem_err_pack(mem, &cmem); if (cper_mem_err_location(&cmem, rcd_decode_str)) printk("%s%s\n", pfx, rcd_decode_str); if (mem->validation_bits & CPER_MEM_VALID_ERROR_TYPE) { u8 etype = mem->error_type; printk("%s""error_type: %d, %s\n", pfx, etype, cper_mem_err_type_str(etype)); } if (cper_dimm_err_location(&cmem, rcd_decode_str)) printk("%s%s\n", pfx, rcd_decode_str); } static const char * const pcie_port_type_strs[] = { "PCIe end point", "legacy PCI end point", "unknown", "unknown", "root port", "upstream switch port", "downstream switch port", "PCIe to PCI/PCI-X bridge", "PCI/PCI-X to PCIe bridge", "root complex integrated endpoint device", "root complex event collector", }; static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie, const struct acpi_hest_generic_data *gdata) { if (pcie->validation_bits & CPER_PCIE_VALID_PORT_TYPE) printk("%s""port_type: %d, %s\n", pfx, pcie->port_type, pcie->port_type < ARRAY_SIZE(pcie_port_type_strs) ? pcie_port_type_strs[pcie->port_type] : "unknown"); if (pcie->validation_bits & CPER_PCIE_VALID_VERSION) printk("%s""version: %d.%d\n", pfx, pcie->version.major, pcie->version.minor); if (pcie->validation_bits & CPER_PCIE_VALID_COMMAND_STATUS) printk("%s""command: 0x%04x, status: 0x%04x\n", pfx, pcie->command, pcie->status); if (pcie->validation_bits & CPER_PCIE_VALID_DEVICE_ID) { const __u8 *p; printk("%s""device_id: %04x:%02x:%02x.%x\n", pfx, pcie->device_id.segment, pcie->device_id.bus, pcie->device_id.device, pcie->device_id.function); printk("%s""slot: %d\n", pfx, pcie->device_id.slot >> CPER_PCIE_SLOT_SHIFT); printk("%s""secondary_bus: 0x%02x\n", pfx, pcie->device_id.secondary_bus); printk("%s""vendor_id: 0x%04x, device_id: 0x%04x\n", pfx, pcie->device_id.vendor_id, pcie->device_id.device_id); p = pcie->device_id.class_code; printk("%s""class_code: %02x%02x%02x\n", pfx, p[0], p[1], p[2]); } if (pcie->validation_bits & CPER_PCIE_VALID_SERIAL_NUMBER) printk("%s""serial number: 0x%04x, 0x%04x\n", pfx, pcie->serial_number.lower, pcie->serial_number.upper); if (pcie->validation_bits & CPER_PCIE_VALID_BRIDGE_CONTROL_STATUS) printk( "%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n", pfx, pcie->bridge.secondary_status, pcie->bridge.control); } static void cper_print_tstamp(const char *pfx, struct acpi_hest_generic_data_v300 *gdata) { __u8 hour, min, sec, day, mon, year, century, *timestamp; if (gdata->validation_bits & ACPI_HEST_GEN_VALID_TIMESTAMP) { timestamp = (__u8 *)&(gdata->time_stamp); sec = bcd2bin(timestamp[0]); min = bcd2bin(timestamp[1]); hour = bcd2bin(timestamp[2]); day = bcd2bin(timestamp[4]); mon = bcd2bin(timestamp[5]); year = bcd2bin(timestamp[6]); century = bcd2bin(timestamp[7]); printk("%s%ststamp: %02d%02d-%02d-%02d %02d:%02d:%02d\n", pfx, (timestamp[3] & 0x1 ? "precise " : "imprecise "), century, year, mon, day, hour, min, sec); } } static void cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata, int sec_no) { guid_t *sec_type = (guid_t *)gdata->section_type; __u16 severity; char newpfx[64]; if (acpi_hest_get_version(gdata) >= 3) cper_print_tstamp(pfx, (struct acpi_hest_generic_data_v300 *)gdata); severity = gdata->error_severity; printk("%s""Error %d, type: %s\n", pfx, sec_no, cper_severity_str(severity)); if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID) printk("%s""fru_id: %pUl\n", pfx, gdata->fru_id); if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT) printk("%s""fru_text: %.20s\n", pfx, gdata->fru_text); snprintf(newpfx, sizeof(newpfx), "%s%s", pfx, INDENT_SP); if (guid_equal(sec_type, &CPER_SEC_PROC_GENERIC)) { struct cper_sec_proc_generic *proc_err = acpi_hest_get_payload(gdata); printk("%s""section_type: general processor error\n", newpfx); if (gdata->error_data_length >= sizeof(*proc_err)) cper_print_proc_generic(newpfx, proc_err); else goto err_section_too_small; } else if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) { struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata); printk("%s""section_type: memory error\n", newpfx); if (gdata->error_data_length >= sizeof(struct cper_sec_mem_err_old)) cper_print_mem(newpfx, mem_err, gdata->error_data_length); else goto err_section_too_small; } else if (guid_equal(sec_type, &CPER_SEC_PCIE)) { struct cper_sec_pcie *pcie = acpi_hest_get_payload(gdata); printk("%s""section_type: PCIe error\n", newpfx); if (gdata->error_data_length >= sizeof(*pcie)) cper_print_pcie(newpfx, pcie, gdata); else goto err_section_too_small; #if defined(CONFIG_ARM64) || defined(CONFIG_ARM) } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PROC_ARM)) { struct cper_sec_proc_arm *arm_err = acpi_hest_get_payload(gdata); printk("%ssection_type: ARM processor error\n", newpfx); if (gdata->error_data_length >= sizeof(*arm_err)) cper_print_proc_arm(newpfx, arm_err); else goto err_section_too_small; #endif } else { const void *err = acpi_hest_get_payload(gdata); printk("%ssection type: unknown, %pUl\n", newpfx, sec_type); printk("%ssection length: %#x\n", newpfx, gdata->error_data_length); print_hex_dump(newpfx, "", DUMP_PREFIX_OFFSET, 16, 4, err, gdata->error_data_length, true); } return; err_section_too_small: pr_err(FW_WARN "error section length is too small\n"); } void cper_estatus_print(const char *pfx, const struct acpi_hest_generic_status *estatus) { struct acpi_hest_generic_data *gdata; int sec_no = 0; char newpfx[64]; __u16 severity; severity = estatus->error_severity; if (severity == CPER_SEV_CORRECTED) printk("%s%s\n", pfx, "It has been corrected by h/w " "and requires no further action"); printk("%s""event severity: %s\n", pfx, cper_severity_str(severity)); snprintf(newpfx, sizeof(newpfx), "%s%s", pfx, INDENT_SP); apei_estatus_for_each_section(estatus, gdata) { cper_estatus_print_section(newpfx, gdata, sec_no); sec_no++; } } EXPORT_SYMBOL_GPL(cper_estatus_print); int cper_estatus_check_header(const struct acpi_hest_generic_status *estatus) { if (estatus->data_length && estatus->data_length < sizeof(struct acpi_hest_generic_data)) return -EINVAL; if (estatus->raw_data_length && estatus->raw_data_offset < sizeof(*estatus) + estatus->data_length) return -EINVAL; return 0; } EXPORT_SYMBOL_GPL(cper_estatus_check_header); int cper_estatus_check(const struct acpi_hest_generic_status *estatus) { struct acpi_hest_generic_data *gdata; unsigned int data_len, gedata_len; int rc; rc = cper_estatus_check_header(estatus); if (rc) return rc; data_len = estatus->data_length; apei_estatus_for_each_section(estatus, gdata) { gedata_len = acpi_hest_get_error_length(gdata); if (gedata_len > data_len - acpi_hest_get_size(gdata)) return -EINVAL; data_len -= acpi_hest_get_record_size(gdata); } if (data_len) return -EINVAL; return 0; } EXPORT_SYMBOL_GPL(cper_estatus_check);
BPI-SINOVOIP/BPI-Mainline-kernel
linux-4.14/drivers/firmware/efi/cper.c
C
gpl-2.0
21,379
// SPDX-License-Identifier: ISC /* * Copyright (C) 2016 Felix Fietkau <[email protected]> */ #include <linux/debugfs.h> #include "mt76x02.h" static int mt76x02_ampdu_stat_read(struct seq_file *file, void *data) { struct mt76x02_dev *dev = file->private; int i, j; for (i = 0; i < 4; i++) { seq_puts(file, "Length: "); for (j = 0; j < 8; j++) seq_printf(file, "%8d | ", i * 8 + j + 1); seq_puts(file, "\n"); seq_puts(file, "Count: "); for (j = 0; j < 8; j++) seq_printf(file, "%8d | ", dev->mt76.aggr_stats[i * 8 + j]); seq_puts(file, "\n"); seq_puts(file, "--------"); for (j = 0; j < 8; j++) seq_puts(file, "-----------"); seq_puts(file, "\n"); } return 0; } static int mt76x02_ampdu_stat_open(struct inode *inode, struct file *f) { return single_open(f, mt76x02_ampdu_stat_read, inode->i_private); } static int read_txpower(struct seq_file *file, void *data) { struct mt76x02_dev *dev = dev_get_drvdata(file->private); seq_printf(file, "Target power: %d\n", dev->target_power); mt76_seq_puts_array(file, "Delta", dev->target_power_delta, ARRAY_SIZE(dev->target_power_delta)); return 0; } static const struct file_operations fops_ampdu_stat = { .open = mt76x02_ampdu_stat_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int mt76x02_dfs_stat_read(struct seq_file *file, void *data) { struct mt76x02_dev *dev = file->private; struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; int i; seq_printf(file, "allocated sequences:\t%d\n", dfs_pd->seq_stats.seq_pool_len); seq_printf(file, "used sequences:\t\t%d\n", dfs_pd->seq_stats.seq_len); seq_puts(file, "\n"); for (i = 0; i < MT_DFS_NUM_ENGINES; i++) { seq_printf(file, "engine: %d\n", i); seq_printf(file, " hw pattern detected:\t%d\n", dfs_pd->stats[i].hw_pattern); seq_printf(file, " hw pulse discarded:\t%d\n", dfs_pd->stats[i].hw_pulse_discarded); seq_printf(file, " sw pattern detected:\t%d\n", dfs_pd->stats[i].sw_pattern); } return 0; } static int mt76x02_dfs_stat_open(struct inode *inode, struct file *f) { return single_open(f, mt76x02_dfs_stat_read, inode->i_private); } static const struct file_operations fops_dfs_stat = { .open = mt76x02_dfs_stat_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int read_agc(struct seq_file *file, void *data) { struct mt76x02_dev *dev = dev_get_drvdata(file->private); seq_printf(file, "avg_rssi: %d\n", dev->cal.avg_rssi_all); seq_printf(file, "low_gain: %d\n", dev->cal.low_gain); seq_printf(file, "false_cca: %d\n", dev->cal.false_cca); seq_printf(file, "agc_gain_adjust: %d\n", dev->cal.agc_gain_adjust); return 0; } static int mt76_edcca_set(void *data, u64 val) { struct mt76x02_dev *dev = data; enum nl80211_dfs_regions region = dev->mt76.region; mutex_lock(&dev->mt76.mutex); dev->ed_monitor_enabled = !!val; dev->ed_monitor = dev->ed_monitor_enabled && region == NL80211_DFS_ETSI; mt76x02_edcca_init(dev); mutex_unlock(&dev->mt76.mutex); return 0; } static int mt76_edcca_get(void *data, u64 *val) { struct mt76x02_dev *dev = data; *val = dev->ed_monitor_enabled; return 0; } DEFINE_DEBUGFS_ATTRIBUTE(fops_edcca, mt76_edcca_get, mt76_edcca_set, "%lld\n"); void mt76x02_init_debugfs(struct mt76x02_dev *dev) { struct dentry *dir; dir = mt76_register_debugfs(&dev->mt76); if (!dir) return; debugfs_create_devm_seqfile(dev->mt76.dev, "xmit-queues", dir, mt76_queues_read); debugfs_create_u8("temperature", 0400, dir, &dev->cal.temp); debugfs_create_bool("tpc", 0600, dir, &dev->enable_tpc); debugfs_create_file("edcca", 0600, dir, dev, &fops_edcca); debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat); debugfs_create_file("dfs_stats", 0400, dir, dev, &fops_dfs_stat); debugfs_create_devm_seqfile(dev->mt76.dev, "txpower", dir, read_txpower); debugfs_create_devm_seqfile(dev->mt76.dev, "agc", dir, read_agc); debugfs_create_u32("tx_hang_reset", 0400, dir, &dev->tx_hang_reset); } EXPORT_SYMBOL_GPL(mt76x02_init_debugfs);
CSE3320/kernel-code
linux-5.8/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
C
gpl-2.0
4,110
cask 'uberpov' do version '3.7-Beta3' sha256 '5e3e8ba5b257ad4e058c2f7735776e271f32c02e9cc02f71b1ece6b8c950c8d0' url "http://megapov.inetart.net/uberpov_mac/downloads/Uberpov_Mac_r#{version.to_i}.zip" name 'UberPOV' homepage 'http://megapov.inetart.net/uberpov_mac/' license :affero app 'Uberpov_Mac/UberPOV.app' caveats do <<-EOS.undent The standard UberPOV include path is: #{staged_path}/Uberpov_Mac/include/ Before starting any renders, you may want to set the include path in UberPOV's preferences under "Files & Paths" > "Set search Paths for additional include files". Sample scenes will be installed at: #{staged_path}/Uberpov_Mac/scenes/ EOS end end
elnappo/homebrew-cask
Casks/uberpov.rb
Ruby
bsd-2-clause
740
/* * Copyright 2011 Google Inc. * * Use of this source code is governed by a BSD-style license that can be * found in the LICENSE file. */ #include "SkBitmap.h" #include "SkRegion.h" bool SkBitmap::scrollRect(const SkIRect* subset, int dx, int dy, SkRegion* inval) const { if (this->isImmutable()) { return false; } if (NULL != subset) { SkBitmap tmp; return this->extractSubset(&tmp, *subset) && // now call again with no rectangle tmp.scrollRect(NULL, dx, dy, inval); } int shift; switch (this->config()) { case kIndex8_Config: case kA8_Config: shift = 0; break; case kARGB_4444_Config: case kRGB_565_Config: shift = 1; break; case kARGB_8888_Config: shift = 2; break; default: // can't scroll this config return false; } int width = this->width(); int height = this->height(); // check if there's nothing to do if ((dx | dy) == 0 || width <= 0 || height <= 0) { if (NULL != inval) { inval->setEmpty(); } return true; } // compute the inval region now, before we see if there are any pixels if (NULL != inval) { SkIRect r; r.set(0, 0, width, height); // initial the region with the entire bounds inval->setRect(r); // do the "scroll" r.offset(dx, dy); // check if we scrolled completely away if (!SkIRect::Intersects(r, inval->getBounds())) { // inval has already been updated... return true; } // compute the dirty area inval->op(r, SkRegion::kDifference_Op); } SkAutoLockPixels alp(*this); // if we have no pixels, just return (inval is already updated) // don't call readyToDraw(), since we don't require a colortable per se if (this->getPixels() == NULL) { return true; } char* dst = (char*)this->getPixels(); const char* src = dst; int rowBytes = (int)this->rowBytes(); // need rowBytes to be signed if (dy <= 0) { src -= dy * rowBytes; height += dy; } else { dst += dy * rowBytes; height -= dy; // now jump src/dst to the last scanline src += (height - 1) * rowBytes; dst += (height - 1) * rowBytes; // now invert rowbytes so we copy backwards in the loop rowBytes = -rowBytes; } if (dx <= 0) { src -= dx << shift; width += dx; } else { dst += dx << shift; width -= dx; } // If the X-translation would push it completely beyond the region, // then there's nothing to draw. if (width <= 0) { return true; } width <<= shift; // now width is the number of bytes to move per line while (--height >= 0) { memmove(dst, src, width); dst += rowBytes; src += rowBytes; } this->notifyPixelsChanged(); return true; }
JoKaWare/GViews
third_party/skia/src/core/SkBitmap_scroll.cpp
C++
bsd-3-clause
3,076
// SPDX-License-Identifier: GPL-2.0-or-later /* drbd.h This file is part of DRBD by Philipp Reisner and Lars Ellenberg. Copyright (C) 2003-2008, LINBIT Information Technologies GmbH. Copyright (C) 2003-2008, Philipp Reisner <[email protected]>. Copyright (C) 2003-2008, Lars Ellenberg <[email protected]>. */ #include <linux/drbd.h> #include "drbd_strings.h" static const char * const drbd_conn_s_names[] = { [C_STANDALONE] = "StandAlone", [C_DISCONNECTING] = "Disconnecting", [C_UNCONNECTED] = "Unconnected", [C_TIMEOUT] = "Timeout", [C_BROKEN_PIPE] = "BrokenPipe", [C_NETWORK_FAILURE] = "NetworkFailure", [C_PROTOCOL_ERROR] = "ProtocolError", [C_WF_CONNECTION] = "WFConnection", [C_WF_REPORT_PARAMS] = "WFReportParams", [C_TEAR_DOWN] = "TearDown", [C_CONNECTED] = "Connected", [C_STARTING_SYNC_S] = "StartingSyncS", [C_STARTING_SYNC_T] = "StartingSyncT", [C_WF_BITMAP_S] = "WFBitMapS", [C_WF_BITMAP_T] = "WFBitMapT", [C_WF_SYNC_UUID] = "WFSyncUUID", [C_SYNC_SOURCE] = "SyncSource", [C_SYNC_TARGET] = "SyncTarget", [C_PAUSED_SYNC_S] = "PausedSyncS", [C_PAUSED_SYNC_T] = "PausedSyncT", [C_VERIFY_S] = "VerifyS", [C_VERIFY_T] = "VerifyT", [C_AHEAD] = "Ahead", [C_BEHIND] = "Behind", }; static const char * const drbd_role_s_names[] = { [R_PRIMARY] = "Primary", [R_SECONDARY] = "Secondary", [R_UNKNOWN] = "Unknown" }; static const char * const drbd_disk_s_names[] = { [D_DISKLESS] = "Diskless", [D_ATTACHING] = "Attaching", [D_FAILED] = "Failed", [D_NEGOTIATING] = "Negotiating", [D_INCONSISTENT] = "Inconsistent", [D_OUTDATED] = "Outdated", [D_UNKNOWN] = "DUnknown", [D_CONSISTENT] = "Consistent", [D_UP_TO_DATE] = "UpToDate", }; static const char * const drbd_state_sw_errors[] = { [-SS_TWO_PRIMARIES] = "Multiple primaries not allowed by config", [-SS_NO_UP_TO_DATE_DISK] = "Need access to UpToDate data", [-SS_NO_LOCAL_DISK] = "Can not resync without local disk", [-SS_NO_REMOTE_DISK] = "Can not resync without remote disk", [-SS_CONNECTED_OUTDATES] = "Refusing to be Outdated while Connected", [-SS_PRIMARY_NOP] = "Refusing to be Primary while peer is not outdated", [-SS_RESYNC_RUNNING] = "Can not start OV/resync since it is already active", [-SS_ALREADY_STANDALONE] = "Can not disconnect a StandAlone device", [-SS_CW_FAILED_BY_PEER] = "State change was refused by peer node", [-SS_IS_DISKLESS] = "Device is diskless, the requested operation requires a disk", [-SS_DEVICE_IN_USE] = "Device is held open by someone", [-SS_NO_NET_CONFIG] = "Have no net/connection configuration", [-SS_NO_VERIFY_ALG] = "Need a verify algorithm to start online verify", [-SS_NEED_CONNECTION] = "Need a connection to start verify or resync", [-SS_NOT_SUPPORTED] = "Peer does not support protocol", [-SS_LOWER_THAN_OUTDATED] = "Disk state is lower than outdated", [-SS_IN_TRANSIENT_STATE] = "In transient state, retry after next state change", [-SS_CONCURRENT_ST_CHG] = "Concurrent state changes detected and aborted", [-SS_OUTDATE_WO_CONN] = "Need a connection for a graceful disconnect/outdate peer", [-SS_O_VOL_PEER_PRI] = "Other vol primary on peer not allowed by config", }; const char *drbd_conn_str(enum drbd_conns s) { /* enums are unsigned... */ return s > C_BEHIND ? "TOO_LARGE" : drbd_conn_s_names[s]; } const char *drbd_role_str(enum drbd_role s) { return s > R_SECONDARY ? "TOO_LARGE" : drbd_role_s_names[s]; } const char *drbd_disk_str(enum drbd_disk_state s) { return s > D_UP_TO_DATE ? "TOO_LARGE" : drbd_disk_s_names[s]; } const char *drbd_set_st_err_str(enum drbd_state_rv err) { return err <= SS_AFTER_LAST_ERROR ? "TOO_SMALL" : err > SS_TWO_PRIMARIES ? "TOO_LARGE" : drbd_state_sw_errors[-err]; }
CSE3320/kernel-code
linux-5.8/drivers/block/drbd/drbd_strings.c
C
gpl-2.0
3,866
// Type definitions for lodash.findLast 4.6 // Project: http://lodash.com/ // Definitions by: Brian Zengel <https://github.com/bczengel>, Ilya Mochalov <https://github.com/chrootsu>, Stepan Mikhaylyuk <https://github.com/stepancar> // Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped // TypeScript Version: 2.2 // Generated from https://github.com/DefinitelyTyped/DefinitelyTyped/blob/master/types/lodash/scripts/generate-modules.ts import { findLast } from "lodash"; export = findLast;
rolandzwaga/DefinitelyTyped
types/lodash.findlast/index.d.ts
TypeScript
mit
504
/* * OHCI HCD (Host Controller Driver) for USB. * * Copyright (C) 2004 SAN People (Pty) Ltd. * Copyright (C) 2005 Thibaut VARENE <[email protected]> * * AT91 Bus Glue * * Based on fragments of 2.4 driver by Rick Bronson. * Based on ohci-omap.c * * This file is licenced under the GPL. */ #include <linux/clk.h> #include <linux/platform_device.h> #include <mach/hardware.h> #include <asm/gpio.h> #include <mach/board.h> #include <mach/cpu.h> #ifndef CONFIG_ARCH_AT91 #error "CONFIG_ARCH_AT91 must be defined." #endif /* interface and function clocks; sometimes also an AHB clock */ static struct clk *iclk, *fclk, *hclk; static int clocked; extern int usb_disabled(void); /*-------------------------------------------------------------------------*/ static void at91_start_clock(void) { if (cpu_is_at91sam9261() || cpu_is_at91sam9g10()) clk_enable(hclk); clk_enable(iclk); clk_enable(fclk); clocked = 1; } static void at91_stop_clock(void) { clk_disable(fclk); clk_disable(iclk); if (cpu_is_at91sam9261() || cpu_is_at91sam9g10()) clk_disable(hclk); clocked = 0; } static void at91_start_hc(struct platform_device *pdev) { struct usb_hcd *hcd = platform_get_drvdata(pdev); struct ohci_regs __iomem *regs = hcd->regs; dev_dbg(&pdev->dev, "start\n"); /* * Start the USB clocks. */ at91_start_clock(); /* * The USB host controller must remain in reset. */ writel(0, &regs->control); } static void at91_stop_hc(struct platform_device *pdev) { struct usb_hcd *hcd = platform_get_drvdata(pdev); struct ohci_regs __iomem *regs = hcd->regs; dev_dbg(&pdev->dev, "stop\n"); /* * Put the USB host controller into reset. */ writel(0, &regs->control); /* * Stop the USB clocks. */ at91_stop_clock(); } /*-------------------------------------------------------------------------*/ static void usb_hcd_at91_remove (struct usb_hcd *, struct platform_device *); /* configure so an HC device and id are always provided */ /* always called with process context; sleeping is OK */ /** * usb_hcd_at91_probe - initialize AT91-based HCDs * Context: !in_interrupt() * * Allocates basic resources for this USB host controller, and * then invokes the start() method for the HCD associated with it * through the hotplug entry's driver_data. */ static int usb_hcd_at91_probe(const struct hc_driver *driver, struct platform_device *pdev) { int retval; struct usb_hcd *hcd = NULL; if (pdev->num_resources != 2) { pr_debug("hcd probe: invalid num_resources"); return -ENODEV; } if ((pdev->resource[0].flags != IORESOURCE_MEM) || (pdev->resource[1].flags != IORESOURCE_IRQ)) { pr_debug("hcd probe: invalid resource type\n"); return -ENODEV; } hcd = usb_create_hcd(driver, &pdev->dev, "at91"); if (!hcd) return -ENOMEM; hcd->rsrc_start = pdev->resource[0].start; hcd->rsrc_len = pdev->resource[0].end - pdev->resource[0].start + 1; if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { pr_debug("request_mem_region failed\n"); retval = -EBUSY; goto err1; } hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); if (!hcd->regs) { pr_debug("ioremap failed\n"); retval = -EIO; goto err2; } iclk = clk_get(&pdev->dev, "ohci_clk"); fclk = clk_get(&pdev->dev, "uhpck"); if (cpu_is_at91sam9261() || cpu_is_at91sam9g10()) hclk = clk_get(&pdev->dev, "hck0"); at91_start_hc(pdev); ohci_hcd_init(hcd_to_ohci(hcd)); retval = usb_add_hcd(hcd, pdev->resource[1].start, IRQF_SHARED); if (retval == 0) return retval; /* Error handling */ at91_stop_hc(pdev); if (cpu_is_at91sam9261() || cpu_is_at91sam9g10()) clk_put(hclk); clk_put(fclk); clk_put(iclk); iounmap(hcd->regs); err2: release_mem_region(hcd->rsrc_start, hcd->rsrc_len); err1: usb_put_hcd(hcd); return retval; } /* may be called with controller, bus, and devices active */ /** * usb_hcd_at91_remove - shutdown processing for AT91-based HCDs * @dev: USB Host Controller being removed * Context: !in_interrupt() * * Reverses the effect of usb_hcd_at91_probe(), first invoking * the HCD's stop() method. It is always called from a thread * context, "rmmod" or something similar. * */ static void usb_hcd_at91_remove(struct usb_hcd *hcd, struct platform_device *pdev) { usb_remove_hcd(hcd); at91_stop_hc(pdev); iounmap(hcd->regs); release_mem_region(hcd->rsrc_start, hcd->rsrc_len); usb_put_hcd(hcd); if (cpu_is_at91sam9261() || cpu_is_at91sam9g10()) clk_put(hclk); clk_put(fclk); clk_put(iclk); fclk = iclk = hclk = NULL; dev_set_drvdata(&pdev->dev, NULL); } /*-------------------------------------------------------------------------*/ static int __devinit ohci_at91_start (struct usb_hcd *hcd) { struct at91_usbh_data *board = hcd->self.controller->platform_data; struct ohci_hcd *ohci = hcd_to_ohci (hcd); int ret; if ((ret = ohci_init(ohci)) < 0) return ret; ohci->num_ports = board->ports; if ((ret = ohci_run(ohci)) < 0) { err("can't start %s", hcd->self.bus_name); ohci_stop(hcd); return ret; } return 0; } /*-------------------------------------------------------------------------*/ static const struct hc_driver ohci_at91_hc_driver = { .description = hcd_name, .product_desc = "AT91 OHCI", .hcd_priv_size = sizeof(struct ohci_hcd), /* * generic hardware linkage */ .irq = ohci_irq, .flags = HCD_USB11 | HCD_MEMORY, /* * basic lifecycle operations */ .start = ohci_at91_start, .stop = ohci_stop, .shutdown = ohci_shutdown, /* * managing i/o requests and associated device resources */ .urb_enqueue = ohci_urb_enqueue, .urb_dequeue = ohci_urb_dequeue, .endpoint_disable = ohci_endpoint_disable, /* * scheduling support */ .get_frame_number = ohci_get_frame, /* * root hub support */ .hub_status_data = ohci_hub_status_data, .hub_control = ohci_hub_control, #ifdef CONFIG_PM .bus_suspend = ohci_bus_suspend, .bus_resume = ohci_bus_resume, #endif .start_port_reset = ohci_start_port_reset, }; /*-------------------------------------------------------------------------*/ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev) { struct at91_usbh_data *pdata = pdev->dev.platform_data; int i; if (pdata) { /* REVISIT make the driver support per-port power switching, * and also overcurrent detection. Here we assume the ports * are always powered while this driver is active, and use * active-low power switches. */ for (i = 0; i < ARRAY_SIZE(pdata->vbus_pin); i++) { if (pdata->vbus_pin[i] <= 0) continue; gpio_request(pdata->vbus_pin[i], "ohci_vbus"); gpio_direction_output(pdata->vbus_pin[i], 0); } } device_init_wakeup(&pdev->dev, 1); return usb_hcd_at91_probe(&ohci_at91_hc_driver, pdev); } static int ohci_hcd_at91_drv_remove(struct platform_device *pdev) { struct at91_usbh_data *pdata = pdev->dev.platform_data; int i; if (pdata) { for (i = 0; i < ARRAY_SIZE(pdata->vbus_pin); i++) { if (pdata->vbus_pin[i] <= 0) continue; gpio_direction_output(pdata->vbus_pin[i], 1); gpio_free(pdata->vbus_pin[i]); } } device_init_wakeup(&pdev->dev, 0); usb_hcd_at91_remove(platform_get_drvdata(pdev), pdev); return 0; } #ifdef CONFIG_PM static int ohci_hcd_at91_drv_suspend(struct platform_device *pdev, pm_message_t mesg) { struct usb_hcd *hcd = platform_get_drvdata(pdev); struct ohci_hcd *ohci = hcd_to_ohci(hcd); if (device_may_wakeup(&pdev->dev)) enable_irq_wake(hcd->irq); /* * The integrated transceivers seem unable to notice disconnect, * reconnect, or wakeup without the 48 MHz clock active. so for * correctness, always discard connection state (using reset). * * REVISIT: some boards will be able to turn VBUS off... */ if (at91_suspend_entering_slow_clock()) { ohci_usb_reset (ohci); at91_stop_clock(); } return 0; } static int ohci_hcd_at91_drv_resume(struct platform_device *pdev) { struct usb_hcd *hcd = platform_get_drvdata(pdev); if (device_may_wakeup(&pdev->dev)) disable_irq_wake(hcd->irq); if (!clocked) at91_start_clock(); ohci_finish_controller_resume(hcd); return 0; } #else #define ohci_hcd_at91_drv_suspend NULL #define ohci_hcd_at91_drv_resume NULL #endif MODULE_ALIAS("platform:at91_ohci"); static struct platform_driver ohci_hcd_at91_driver = { .probe = ohci_hcd_at91_drv_probe, .remove = ohci_hcd_at91_drv_remove, .shutdown = usb_hcd_platform_shutdown, .suspend = ohci_hcd_at91_drv_suspend, .resume = ohci_hcd_at91_drv_resume, .driver = { .name = "at91_ohci", .owner = THIS_MODULE, }, };
EAVR/EV3.14
ev3sources/extra/linux-03.20.00.13/drivers/usb/host/ohci-at91.c
C
gpl-2.0
8,572
Ext.define('ExtThemeNeptune.panel.Table', { override: 'Ext.panel.Table', initComponent: function() { var me = this; if (!me.hasOwnProperty('bodyBorder') && !me.hideHeaders) { me.bodyBorder = true; } me.callParent(); } });
applifireAlgo/ZenClubApp
zenws/src/main/webapp/ext/packages/ext-theme-neptune/overrides/panel/Table.js
JavaScript
gpl-3.0
280
//===-- SystemZMCInstLower.cpp - Lower MachineInstr to MCInst -------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "SystemZMCInstLower.h" #include "SystemZAsmPrinter.h" #include "llvm/IR/Mangler.h" #include "llvm/MC/MCExpr.h" #include "llvm/MC/MCInst.h" #include "llvm/MC/MCStreamer.h" using namespace llvm; // Return the VK_* enumeration for MachineOperand target flags Flags. static MCSymbolRefExpr::VariantKind getVariantKind(unsigned Flags) { switch (Flags & SystemZII::MO_SYMBOL_MODIFIER) { case 0: return MCSymbolRefExpr::VK_None; case SystemZII::MO_GOT: return MCSymbolRefExpr::VK_GOT; case SystemZII::MO_INDNTPOFF: return MCSymbolRefExpr::VK_INDNTPOFF; } llvm_unreachable("Unrecognised MO_ACCESS_MODEL"); } SystemZMCInstLower::SystemZMCInstLower(MCContext &ctx, SystemZAsmPrinter &asmprinter) : Ctx(ctx), AsmPrinter(asmprinter) {} const MCExpr * SystemZMCInstLower::getExpr(const MachineOperand &MO, MCSymbolRefExpr::VariantKind Kind) const { const MCSymbol *Symbol; bool HasOffset = true; switch (MO.getType()) { case MachineOperand::MO_MachineBasicBlock: Symbol = MO.getMBB()->getSymbol(); HasOffset = false; break; case MachineOperand::MO_GlobalAddress: Symbol = AsmPrinter.getSymbol(MO.getGlobal()); break; case MachineOperand::MO_ExternalSymbol: Symbol = AsmPrinter.GetExternalSymbolSymbol(MO.getSymbolName()); break; case MachineOperand::MO_JumpTableIndex: Symbol = AsmPrinter.GetJTISymbol(MO.getIndex()); HasOffset = false; break; case MachineOperand::MO_ConstantPoolIndex: Symbol = AsmPrinter.GetCPISymbol(MO.getIndex()); break; case MachineOperand::MO_BlockAddress: Symbol = AsmPrinter.GetBlockAddressSymbol(MO.getBlockAddress()); break; default: llvm_unreachable("unknown operand type"); } const MCExpr *Expr = MCSymbolRefExpr::create(Symbol, Kind, Ctx); if (HasOffset) if (int64_t Offset = MO.getOffset()) { const MCExpr *OffsetExpr = MCConstantExpr::create(Offset, Ctx); Expr = MCBinaryExpr::createAdd(Expr, OffsetExpr, Ctx); } return Expr; } MCOperand SystemZMCInstLower::lowerOperand(const MachineOperand &MO) const { switch (MO.getType()) { case MachineOperand::MO_Register: return MCOperand::createReg(MO.getReg()); case MachineOperand::MO_Immediate: return MCOperand::createImm(MO.getImm()); default: { MCSymbolRefExpr::VariantKind Kind = getVariantKind(MO.getTargetFlags()); return MCOperand::createExpr(getExpr(MO, Kind)); } } } void SystemZMCInstLower::lower(const MachineInstr *MI, MCInst &OutMI) const { OutMI.setOpcode(MI->getOpcode()); for (unsigned I = 0, E = MI->getNumOperands(); I != E; ++I) { const MachineOperand &MO = MI->getOperand(I); // Ignore all implicit register operands. if (!MO.isReg() || !MO.isImplicit()) OutMI.addOperand(lowerOperand(MO)); } }
vinutah/apps
tools/llvm/llvm_39/opt/lib/Target/SystemZ/SystemZMCInstLower.cpp
C++
gpl-3.0
3,210
<?php namespace Intervention\Image\Imagick\Commands; use \Intervention\Image\Imagick\Color; class TrimCommand extends \Intervention\Image\Commands\AbstractCommand { /** * Trims away parts of an image * * @param \Intervention\Image\Image $image * @return boolean */ public function execute($image) { $base = $this->argument(0)->type('string')->value(); $away = $this->argument(1)->value(); $tolerance = $this->argument(2)->type('numeric')->value(0); $feather = $this->argument(3)->type('numeric')->value(0); $width = $image->getWidth(); $height = $image->getHeight(); $checkTransparency = false; // define borders to trim away if (is_null($away)) { $away = array('top', 'right', 'bottom', 'left'); } elseif (is_string($away)) { $away = array($away); } // lower border names foreach ($away as $key => $value) { $away[$key] = strtolower($value); } // define base color position switch (strtolower($base)) { case 'transparent': case 'trans': $checkTransparency = true; $base_x = 0; $base_y = 0; break; case 'bottom-right': case 'right-bottom': $base_x = $width - 1; $base_y = $height - 1; break; default: case 'top-left': case 'left-top': $base_x = 0; $base_y = 0; break; } // pick base color if ($checkTransparency) { $base_color = new Color; // color will only be used to compare alpha channel } else { $base_color = $image->pickColor($base_x, $base_y, 'object'); } // trim on clone to get only coordinates $trimed = clone $image->getCore(); // add border to trim specific color $trimed->borderImage($base_color->getPixel(), 1, 1); // trim image $trimed->trimImage(65850 / 100 * $tolerance); // get coordinates of trim $imagePage = $trimed->getImagePage(); list($crop_x, $crop_y) = array($imagePage['x']-1, $imagePage['y']-1); // $trimed->setImagePage(0, 0, 0, 0); list($crop_width, $crop_height) = array($trimed->width, $trimed->height); // adjust settings if right should not be trimed if ( ! in_array('right', $away)) { $crop_width = $crop_width + ($width - ($width - $crop_x)); } // adjust settings if bottom should not be trimed if ( ! in_array('bottom', $away)) { $crop_height = $crop_height + ($height - ($height - $crop_y)); } // adjust settings if left should not be trimed if ( ! in_array('left', $away)) { $crop_width = $crop_width + $crop_x; $crop_x = 0; } // adjust settings if top should not be trimed if ( ! in_array('top', $away)) { $crop_height = $crop_height + $crop_y; $crop_y = 0; } // add feather $crop_width = min($width, ($crop_width + $feather * 2)); $crop_height = min($height, ($crop_height + $feather * 2)); $crop_x = max(0, ($crop_x - $feather)); $crop_y = max(0, ($crop_y - $feather)); // finally crop based on page $image->getCore()->cropImage($crop_width, $crop_height, $crop_x, $crop_y); $image->getCore()->setImagePage(0,0,0,0); $trimed->destroy(); return true; } }
CE-KMITL-CLOUD-2014/Foodshare
your-project-name/vendor/intervention/image/src/Intervention/Image/Imagick/Commands/TrimCommand.php
PHP
apache-2.0
3,670
/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package testing import "k8s.io/kubernetes/pkg/api/v1" type FakePodDeletionSafetyProvider struct{} func (f *FakePodDeletionSafetyProvider) PodResourcesAreReclaimed(pod *v1.Pod, status v1.PodStatus) bool { return true }
BradErz/kops
vendor/k8s.io/kubernetes/pkg/kubelet/status/testing/fake_pod_deletion_safety.go
GO
apache-2.0
793
/* Estonian translation for the jQuery Timepicker Addon */ /* Written by Karl Sutt ([email protected]) */ (function($) { $.timepicker.regional['et'] = { timeOnlyTitle: 'Vali aeg', timeText: 'Aeg', hourText: 'Tund', minuteText: 'Minut', secondText: 'Sekund', millisecText: 'Millisekundis', timezoneText: 'Ajavöönd', currentText: 'Praegu', closeText: 'Valmis', timeFormat: 'HH:mm', amNames: ['AM', 'A'], pmNames: ['PM', 'P'], isRTL: false }; $.timepicker.setDefaults($.timepicker.regional['et']); })(jQuery);
chinhlv91/wp_melano
wp-content/themes/wp_melano_3.8/coo-theme/meta-box/js/jqueryui/timepicker-i18n/jquery-ui-timepicker-et.js
JavaScript
gpl-2.0
533
dnl Intel Pentium-4 mpn_sqr_basecase -- square an mpn number. dnl Copyright 2001, 2002 Free Software Foundation, Inc. dnl dnl This file is part of the GNU MP Library. dnl dnl The GNU MP Library is free software; you can redistribute it and/or dnl modify it under the terms of the GNU Lesser General Public License as dnl published by the Free Software Foundation; either version 2.1 of the dnl License, or (at your option) any later version. dnl dnl The GNU MP Library is distributed in the hope that it will be useful, dnl but WITHOUT ANY WARRANTY; without even the implied warranty of dnl MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU dnl Lesser General Public License for more details. dnl dnl You should have received a copy of the GNU Lesser General Public dnl License along with the GNU MP Library; see the file COPYING.LIB. If dnl not, write to the Free Software Foundation, Inc., 51 Franklin Street, dnl Fifth Floor, Boston, MA 02110-1301, USA. include(`../config.m4') C P4: approx 3.5 cycles per crossproduct, or 7 cycles per triangular C product, at around 30x30 limbs. C void mpn_sqr_basecase (mp_ptr dst, mp_srcptr src, mp_size_t size); C C The algorithm is basically the same as mpn/generic/sqr_basecase.c, but a C lot of function call overheads are avoided, especially when the size is C small. C C On small sizes there's only a small speedup over mpn_mul_basecase, C presumably branch mispredictions are a bigger fraction of the work done. C It's not clear how to help this. defframe(PARAM_SIZE,12) defframe(PARAM_SRC, 8) defframe(PARAM_DST, 4) TEXT ALIGN(8) PROLOGUE(mpn_sqr_basecase) deflit(`FRAME',0) movl PARAM_SIZE, %edx movl PARAM_SRC, %eax movl PARAM_DST, %ecx cmpl $2, %edx je L(two_limbs) ja L(three_or_more) C ----------------------------------------------------------------------------- C one limb only C eax src C ebx C ecx dst C edx movl (%eax), %eax mull %eax movl %eax, (%ecx) movl %edx, 4(%ecx) ret C ----------------------------------------------------------------------------- L(two_limbs): C eax src C ebx C ecx dst C edx size movd (%eax), %mm1 movd 4(%eax), %mm0 pmuludq %mm1, %mm0 C src[0]*src[1] pmuludq %mm1, %mm1 C src[0]^2 movd 4(%eax), %mm2 pmuludq %mm2, %mm2 C src[1]^2 movd %mm1, (%ecx) C dst[0] psrlq $32, %mm1 pcmpeqd %mm3, %mm3 psrlq $32, %mm3 C 0x00000000FFFFFFFF pand %mm0, %mm3 C low(src[0]*src[1]) psrlq $32, %mm0 C high(src[0]*src[1]) psllq $1, %mm3 C 2*low(src[0]*src[1]) paddq %mm3, %mm1 C high(src[0]^2) movd %mm1, 4(%ecx) C dst[1] pcmpeqd %mm4, %mm4 psrlq $32, %mm4 C 0x00000000FFFFFFFF pand %mm2, %mm4 C low(src[1]^2) psrlq $32, %mm2 C high(src[1]^2) psllq $1, %mm0 C 2*high(src[0]*src[1]) psrlq $32, %mm1 C carry paddq %mm1, %mm0 paddq %mm4, %mm0 C low(src[1]^2) movd %mm0, 8(%ecx) C dst[2] psrlq $32, %mm0 C carry paddq %mm2, %mm0 C high(src[1]^2) movd %mm0, 12(%ecx) C dst[3] ASSERT(z,` psrlq $32, %mm0 movd %mm0, %eax orl %eax, %eax') emms ret C ----------------------------------------------------------------------------- L(three_or_more): C eax src C ebx C ecx dst C edx size C esi C edi C ebp C C First multiply src[0]*src[1..size-1] and store at dst[1..size]. defframe(SAVE_ESI, -4) defframe(SAVE_EDI, -8) defframe(SAVE_EBP, -12) deflit(STACK_SPACE, 12) subl $STACK_SPACE, %esp FRAME_subl_esp(STACK_SPACE) pxor %mm0, %mm0 C initial carry movd (%eax), %mm7 C multiplier movl %esi, SAVE_ESI movl %edi, SAVE_EDI movl %ebp, SAVE_EBP movl %eax, %esi movl %ecx, %edi subl $1, %edx C First multiply src[0]*src[1..size-1] and store at dst[1..size]. L(mul1): C eax src, incrementing C ebx C ecx dst, incrementing C edx counter, size-1 iterations C esi src C edi dst C ebp C C mm0 carry limb C mm7 multiplier movd 4(%eax), %mm1 addl $4, %eax pmuludq %mm7, %mm1 paddq %mm1, %mm0 movd %mm0, 4(%ecx) addl $4, %ecx psrlq $32, %mm0 subl $1, %edx jnz L(mul1) movl PARAM_SIZE, %ebp subl $3, %ebp jz L(corner) C Add products src[n]*src[n+1..size-1] at dst[2*n-1...], for C n=1..size-2. The last two products, which are the end corner of C the product triangle, are handled separately to save looping C overhead. L(outer): C eax C ebx C ecx C edx C esi src, incrementing C edi dst, incrementing C ebp size, decrementing C C mm0 prev carry movd 4(%esi), %mm7 C multiplier movd %mm0, 4(%ecx) C prev carry leal 8(%esi), %eax C next src addl $4, %esi leal 8(%edi), %ecx C next dst addl $8, %edi leal 1(%ebp), %edx C counter pxor %mm0, %mm0 C initial carry limb, clear carry flag L(inner): C eax src, incrementing C edx C ecx dst, incrementing C edx counter C esi outer src C edi outer dst C ebp outer size C C mm0 carry movd (%eax), %mm1 leal 4(%eax), %eax movd 4(%ecx),%mm2 pmuludq %mm7, %mm1 paddq %mm2, %mm1 paddq %mm1, %mm0 subl $1, %edx movd %mm0, 4(%ecx) psrlq $32, %mm0 leal 4(%ecx), %ecx jnz L(inner) subl $1, %ebp jnz L(outer) L(corner): C esi &src[size-3] C edi &dst[2*size-6] C mm0 carry C C +-----+-----+-- C | mm0 | dst so far C +-----+-----+-- C +-----+-----+ C | | | src[size-2]*src[size-1] C +-----+-----+ movd 4(%esi), %mm1 movd 8(%esi), %mm2 pmuludq %mm2, %mm1 C src[size-1]*src[size-2] movl PARAM_SRC, %eax movd (%eax), %mm2 pmuludq %mm2, %mm2 C src[0]^2 pcmpeqd %mm7, %mm7 psrlq $32, %mm7 movl PARAM_DST, %edx movd 4(%edx), %mm3 C dst[1] paddq %mm1, %mm0 movd %mm0, 12(%edi) C dst[2*size-3] psrlq $32, %mm0 movd %mm0, 16(%edi) C dst[2*size-2] movd %mm2, (%edx) C dst[0] psrlq $32, %mm2 psllq $1, %mm3 C 2*dst[1] paddq %mm3, %mm2 movd %mm2, 4(%edx) psrlq $32, %mm2 movl PARAM_SIZE, %ecx subl $2, %ecx C Now form squares on the diagonal src[0]^2,...,src[size-1]^2, and C add to the triangular parts dst[1..2*size-2] with those left C shifted by 1 bit. L(diag): C eax src, incrementing C ebx C ecx counter, size-2 iterations C edx dst, incrementing C esi C edi C ebp C C mm2 carry C mm7 0x00000000FFFFFFFF movd 4(%eax), %mm0 C src limb addl $4, %eax pmuludq %mm0, %mm0 movq %mm7, %mm1 pand %mm0, %mm1 C diagonal low psrlq $32, %mm0 C diagonal high movd 8(%edx), %mm3 psllq $1, %mm3 C 2*dst[i] paddq %mm3, %mm1 paddq %mm1, %mm2 movd %mm2, 8(%edx) psrlq $32, %mm2 movd 12(%edx), %mm3 psllq $1, %mm3 C 2*dst[i+1] paddq %mm3, %mm0 paddq %mm0, %mm2 movd %mm2, 12(%edx) addl $8, %edx psrlq $32, %mm2 subl $1, %ecx jnz L(diag) movd 4(%eax), %mm0 C src[size-1] pmuludq %mm0, %mm0 pand %mm0, %mm7 C diagonal low psrlq $32, %mm0 C diagonal high movd 8(%edx), %mm3 C dst[2*size-2] psllq $1, %mm3 paddq %mm3, %mm7 paddq %mm7, %mm2 movd %mm2, 8(%edx) psrlq $32, %mm2 paddq %mm0, %mm2 movd %mm2, 12(%edx) C dst[2*size-1] ASSERT(z,` C no further carry psrlq $32, %mm2 movd %mm2, %eax orl %eax, %eax') movl SAVE_ESI, %esi movl SAVE_EDI, %edi movl SAVE_EBP, %ebp addl $STACK_SPACE, %esp emms ret EPILOGUE()
DDTChen/CookieVLC
vlc/contrib/android/gmp/mpn/x86/pentium4/sse2/sqr_basecase.asm
Assembly
gpl-2.0
7,055
/* * Copyright (c) 2010-2011 Atheros Communications Inc. * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef CORE_H #define CORE_H #include <linux/etherdevice.h> #include <linux/rtnetlink.h> #include <linux/firmware.h> #include <linux/sched.h> #include <linux/circ_buf.h> #include <net/cfg80211.h> #include "htc.h" #include "wmi.h" #include "bmi.h" #include "target.h" #define MAX_ATH6KL 1 #define ATH6KL_MAX_RX_BUFFERS 16 #define ATH6KL_BUFFER_SIZE 1664 #define ATH6KL_MAX_AMSDU_RX_BUFFERS 4 #define ATH6KL_AMSDU_REFILL_THRESHOLD 3 #define ATH6KL_AMSDU_BUFFER_SIZE (WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH + 128) #define MAX_MSDU_SUBFRAME_PAYLOAD_LEN 1508 #define MIN_MSDU_SUBFRAME_PAYLOAD_LEN 46 #define USER_SAVEDKEYS_STAT_INIT 0 #define USER_SAVEDKEYS_STAT_RUN 1 #define ATH6KL_TX_TIMEOUT 10 #define ATH6KL_MAX_ENDPOINTS 4 #define MAX_NODE_NUM 15 #define ATH6KL_APSD_ALL_FRAME 0xFFFF #define ATH6KL_APSD_NUM_OF_AC 0x4 #define ATH6KL_APSD_FRAME_MASK 0xF /* Extra bytes for htc header alignment */ #define ATH6KL_HTC_ALIGN_BYTES 3 /* MAX_HI_COOKIE_NUM are reserved for high priority traffic */ #define MAX_DEF_COOKIE_NUM 180 #define MAX_HI_COOKIE_NUM 18 /* 10% of MAX_COOKIE_NUM */ #define MAX_COOKIE_NUM (MAX_DEF_COOKIE_NUM + MAX_HI_COOKIE_NUM) #define MAX_DEFAULT_SEND_QUEUE_DEPTH (MAX_DEF_COOKIE_NUM / WMM_NUM_AC) #define DISCON_TIMER_INTVAL 10000 /* in msec */ /* Channel dwell time in fg scan */ #define ATH6KL_FG_SCAN_INTERVAL 50 /* in ms */ /* includes also the null byte */ #define ATH6KL_FIRMWARE_MAGIC "QCA-ATH6KL" enum ath6kl_fw_ie_type { ATH6KL_FW_IE_FW_VERSION = 0, ATH6KL_FW_IE_TIMESTAMP = 1, ATH6KL_FW_IE_OTP_IMAGE = 2, ATH6KL_FW_IE_FW_IMAGE = 3, ATH6KL_FW_IE_PATCH_IMAGE = 4, ATH6KL_FW_IE_RESERVED_RAM_SIZE = 5, ATH6KL_FW_IE_CAPABILITIES = 6, ATH6KL_FW_IE_PATCH_ADDR = 7, ATH6KL_FW_IE_BOARD_ADDR = 8, ATH6KL_FW_IE_VIF_MAX = 9, }; enum ath6kl_fw_capability { ATH6KL_FW_CAPABILITY_HOST_P2P = 0, ATH6KL_FW_CAPABILITY_SCHED_SCAN = 1, /* * Firmware is capable of supporting P2P mgmt operations on a * station interface. After group formation, the station * interface will become a P2P client/GO interface as the case may be */ ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX, /* * Firmware has support to cleanup inactive stations * in AP mode. */ ATH6KL_FW_CAPABILITY_INACTIVITY_TIMEOUT, /* Firmware has support to override rsn cap of rsn ie */ ATH6KL_FW_CAPABILITY_RSN_CAP_OVERRIDE, /* * Multicast support in WOW and host awake mode. * Allow all multicast in host awake mode. * Apply multicast filter in WOW mode. */ ATH6KL_FW_CAPABILITY_WOW_MULTICAST_FILTER, /* Firmware supports enhanced bmiss detection */ ATH6KL_FW_CAPABILITY_BMISS_ENHANCE, /* * FW supports matching of ssid in schedule scan */ ATH6KL_FW_CAPABILITY_SCHED_SCAN_MATCH_LIST, /* Firmware supports filtering BSS results by RSSI */ ATH6KL_FW_CAPABILITY_RSSI_SCAN_THOLD, /* FW sets mac_addr[4] ^= 0x80 for newly created interfaces */ ATH6KL_FW_CAPABILITY_CUSTOM_MAC_ADDR, /* Firmware supports TX error rate notification */ ATH6KL_FW_CAPABILITY_TX_ERR_NOTIFY, /* supports WMI_SET_REGDOMAIN_CMDID command */ ATH6KL_FW_CAPABILITY_REGDOMAIN, /* Firmware supports sched scan decoupled from host sleep */ ATH6KL_FW_CAPABILITY_SCHED_SCAN_V2, /* * Firmware capability for hang detection through heart beat * challenge messages. */ ATH6KL_FW_CAPABILITY_HEART_BEAT_POLL, /* this needs to be last */ ATH6KL_FW_CAPABILITY_MAX, }; #define ATH6KL_CAPABILITY_LEN (ALIGN(ATH6KL_FW_CAPABILITY_MAX, 32) / 32) struct ath6kl_fw_ie { __le32 id; __le32 len; u8 data[0]; }; enum ath6kl_hw_flags { ATH6KL_HW_64BIT_RATES = BIT(0), ATH6KL_HW_AP_INACTIVITY_MINS = BIT(1), ATH6KL_HW_MAP_LP_ENDPOINT = BIT(2), ATH6KL_HW_SDIO_CRC_ERROR_WAR = BIT(3), }; #define ATH6KL_FW_API2_FILE "fw-2.bin" #define ATH6KL_FW_API3_FILE "fw-3.bin" #define ATH6KL_FW_API4_FILE "fw-4.bin" /* AR6003 1.0 definitions */ #define AR6003_HW_1_0_VERSION 0x300002ba /* AR6003 2.0 definitions */ #define AR6003_HW_2_0_VERSION 0x30000384 #define AR6003_HW_2_0_PATCH_DOWNLOAD_ADDRESS 0x57e910 #define AR6003_HW_2_0_FW_DIR "ath6k/AR6003/hw2.0" #define AR6003_HW_2_0_OTP_FILE "otp.bin.z77" #define AR6003_HW_2_0_FIRMWARE_FILE "athwlan.bin.z77" #define AR6003_HW_2_0_TCMD_FIRMWARE_FILE "athtcmd_ram.bin" #define AR6003_HW_2_0_PATCH_FILE "data.patch.bin" #define AR6003_HW_2_0_BOARD_DATA_FILE AR6003_HW_2_0_FW_DIR "/bdata.bin" #define AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE \ AR6003_HW_2_0_FW_DIR "/bdata.SD31.bin" /* AR6003 3.0 definitions */ #define AR6003_HW_2_1_1_VERSION 0x30000582 #define AR6003_HW_2_1_1_FW_DIR "ath6k/AR6003/hw2.1.1" #define AR6003_HW_2_1_1_OTP_FILE "otp.bin" #define AR6003_HW_2_1_1_FIRMWARE_FILE "athwlan.bin" #define AR6003_HW_2_1_1_TCMD_FIRMWARE_FILE "athtcmd_ram.bin" #define AR6003_HW_2_1_1_UTF_FIRMWARE_FILE "utf.bin" #define AR6003_HW_2_1_1_TESTSCRIPT_FILE "nullTestFlow.bin" #define AR6003_HW_2_1_1_PATCH_FILE "data.patch.bin" #define AR6003_HW_2_1_1_BOARD_DATA_FILE AR6003_HW_2_1_1_FW_DIR "/bdata.bin" #define AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE \ AR6003_HW_2_1_1_FW_DIR "/bdata.SD31.bin" /* AR6004 1.0 definitions */ #define AR6004_HW_1_0_VERSION 0x30000623 #define AR6004_HW_1_0_FW_DIR "ath6k/AR6004/hw1.0" #define AR6004_HW_1_0_FIRMWARE_FILE "fw.ram.bin" #define AR6004_HW_1_0_BOARD_DATA_FILE AR6004_HW_1_0_FW_DIR "/bdata.bin" #define AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE \ AR6004_HW_1_0_FW_DIR "/bdata.DB132.bin" /* AR6004 1.1 definitions */ #define AR6004_HW_1_1_VERSION 0x30000001 #define AR6004_HW_1_1_FW_DIR "ath6k/AR6004/hw1.1" #define AR6004_HW_1_1_FIRMWARE_FILE "fw.ram.bin" #define AR6004_HW_1_1_BOARD_DATA_FILE AR6004_HW_1_1_FW_DIR "/bdata.bin" #define AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE \ AR6004_HW_1_1_FW_DIR "/bdata.DB132.bin" /* AR6004 1.2 definitions */ #define AR6004_HW_1_2_VERSION 0x300007e8 #define AR6004_HW_1_2_FW_DIR "ath6k/AR6004/hw1.2" #define AR6004_HW_1_2_FIRMWARE_FILE "fw.ram.bin" #define AR6004_HW_1_2_BOARD_DATA_FILE AR6004_HW_1_2_FW_DIR "/bdata.bin" #define AR6004_HW_1_2_DEFAULT_BOARD_DATA_FILE \ AR6004_HW_1_2_FW_DIR "/bdata.bin" /* AR6004 1.3 definitions */ #define AR6004_HW_1_3_VERSION 0x31c8088a #define AR6004_HW_1_3_FW_DIR "ath6k/AR6004/hw1.3" #define AR6004_HW_1_3_FIRMWARE_FILE "fw.ram.bin" #define AR6004_HW_1_3_BOARD_DATA_FILE "ath6k/AR6004/hw1.3/bdata.bin" #define AR6004_HW_1_3_DEFAULT_BOARD_DATA_FILE "ath6k/AR6004/hw1.3/bdata.bin" /* Per STA data, used in AP mode */ #define STA_PS_AWAKE BIT(0) #define STA_PS_SLEEP BIT(1) #define STA_PS_POLLED BIT(2) #define STA_PS_APSD_TRIGGER BIT(3) #define STA_PS_APSD_EOSP BIT(4) /* HTC TX packet tagging definitions */ #define ATH6KL_CONTROL_PKT_TAG HTC_TX_PACKET_TAG_USER_DEFINED #define ATH6KL_DATA_PKT_TAG (ATH6KL_CONTROL_PKT_TAG + 1) #define AR6003_CUST_DATA_SIZE 16 #define AGGR_WIN_IDX(x, y) ((x) % (y)) #define AGGR_INCR_IDX(x, y) AGGR_WIN_IDX(((x) + 1), (y)) #define AGGR_DCRM_IDX(x, y) AGGR_WIN_IDX(((x) - 1), (y)) #define ATH6KL_MAX_SEQ_NO 0xFFF #define ATH6KL_NEXT_SEQ_NO(x) (((x) + 1) & ATH6KL_MAX_SEQ_NO) #define NUM_OF_TIDS 8 #define AGGR_SZ_DEFAULT 8 #define AGGR_WIN_SZ_MIN 2 #define AGGR_WIN_SZ_MAX 8 #define TID_WINDOW_SZ(_x) ((_x) << 1) #define AGGR_NUM_OF_FREE_NETBUFS 16 #define AGGR_RX_TIMEOUT 100 /* in ms */ #define WMI_TIMEOUT (2 * HZ) #define MBOX_YIELD_LIMIT 99 #define ATH6KL_DEFAULT_LISTEN_INTVAL 100 /* in TUs */ #define ATH6KL_DEFAULT_BMISS_TIME 1500 #define ATH6KL_MAX_WOW_LISTEN_INTL 300 /* in TUs */ #define ATH6KL_MAX_BMISS_TIME 5000 /* configuration lags */ /* * ATH6KL_CONF_IGNORE_ERP_BARKER: Ignore the barker premable in * ERP IE of beacon to determine the short premable support when * sending (Re)Assoc req. * ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN: Don't send the power * module state transition failure events which happen during * scan, to the host. */ #define ATH6KL_CONF_IGNORE_ERP_BARKER BIT(0) #define ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN BIT(1) #define ATH6KL_CONF_ENABLE_11N BIT(2) #define ATH6KL_CONF_ENABLE_TX_BURST BIT(3) #define ATH6KL_CONF_UART_DEBUG BIT(4) #define P2P_WILDCARD_SSID_LEN 7 /* DIRECT- */ enum wlan_low_pwr_state { WLAN_POWER_STATE_ON, WLAN_POWER_STATE_CUT_PWR, WLAN_POWER_STATE_DEEP_SLEEP, WLAN_POWER_STATE_WOW }; enum sme_state { SME_DISCONNECTED, SME_CONNECTING, SME_CONNECTED }; struct skb_hold_q { struct sk_buff *skb; bool is_amsdu; u16 seq_no; }; struct rxtid { bool aggr; bool timer_mon; u16 win_sz; u16 seq_next; u32 hold_q_sz; struct skb_hold_q *hold_q; struct sk_buff_head q; /* * lock mainly protects seq_next and hold_q. Movement of seq_next * needs to be protected between aggr_timeout() and * aggr_process_recv_frm(). hold_q will be holding the pending * reorder frames and it's access should also be protected. * Some of the other fields like hold_q_sz, win_sz and aggr are * initialized/reset when receiving addba/delba req, also while * deleting aggr state all the pending buffers are flushed before * resetting these fields, so there should not be any race in accessing * these fields. */ spinlock_t lock; }; struct rxtid_stats { u32 num_into_aggr; u32 num_dups; u32 num_oow; u32 num_mpdu; u32 num_amsdu; u32 num_delivered; u32 num_timeouts; u32 num_hole; u32 num_bar; }; struct aggr_info_conn { u8 aggr_sz; u8 timer_scheduled; struct timer_list timer; struct net_device *dev; struct rxtid rx_tid[NUM_OF_TIDS]; struct rxtid_stats stat[NUM_OF_TIDS]; struct aggr_info *aggr_info; }; struct aggr_info { struct aggr_info_conn *aggr_conn; struct sk_buff_head rx_amsdu_freeq; }; struct ath6kl_wep_key { u8 key_index; u8 key_len; u8 key[64]; }; #define ATH6KL_KEY_SEQ_LEN 8 struct ath6kl_key { u8 key[WLAN_MAX_KEY_LEN]; u8 key_len; u8 seq[ATH6KL_KEY_SEQ_LEN]; u8 seq_len; u32 cipher; }; struct ath6kl_node_mapping { u8 mac_addr[ETH_ALEN]; u8 ep_id; u8 tx_pend; }; struct ath6kl_cookie { struct sk_buff *skb; u32 map_no; struct htc_packet htc_pkt; struct ath6kl_cookie *arc_list_next; }; struct ath6kl_mgmt_buff { struct list_head list; u32 freq; u32 wait; u32 id; bool no_cck; size_t len; u8 buf[0]; }; struct ath6kl_sta { u16 sta_flags; u8 mac[ETH_ALEN]; u8 aid; u8 keymgmt; u8 ucipher; u8 auth; u8 wpa_ie[ATH6KL_MAX_IE]; struct sk_buff_head psq; /* protects psq, mgmt_psq, apsdq, and mgmt_psq_len fields */ spinlock_t psq_lock; struct list_head mgmt_psq; size_t mgmt_psq_len; u8 apsd_info; struct sk_buff_head apsdq; struct aggr_info_conn *aggr_conn; }; struct ath6kl_version { u32 target_ver; u32 wlan_ver; u32 abi_ver; }; struct ath6kl_bmi { u32 cmd_credits; bool done_sent; u8 *cmd_buf; u32 max_data_size; u32 max_cmd_size; }; struct target_stats { u64 tx_pkt; u64 tx_byte; u64 tx_ucast_pkt; u64 tx_ucast_byte; u64 tx_mcast_pkt; u64 tx_mcast_byte; u64 tx_bcast_pkt; u64 tx_bcast_byte; u64 tx_rts_success_cnt; u64 tx_pkt_per_ac[4]; u64 tx_err; u64 tx_fail_cnt; u64 tx_retry_cnt; u64 tx_mult_retry_cnt; u64 tx_rts_fail_cnt; u64 rx_pkt; u64 rx_byte; u64 rx_ucast_pkt; u64 rx_ucast_byte; u64 rx_mcast_pkt; u64 rx_mcast_byte; u64 rx_bcast_pkt; u64 rx_bcast_byte; u64 rx_frgment_pkt; u64 rx_err; u64 rx_crc_err; u64 rx_key_cache_miss; u64 rx_decrypt_err; u64 rx_dupl_frame; u64 tkip_local_mic_fail; u64 tkip_cnter_measures_invoked; u64 tkip_replays; u64 tkip_fmt_err; u64 ccmp_fmt_err; u64 ccmp_replays; u64 pwr_save_fail_cnt; u64 cs_bmiss_cnt; u64 cs_low_rssi_cnt; u64 cs_connect_cnt; u64 cs_discon_cnt; s32 tx_ucast_rate; s32 rx_ucast_rate; u32 lq_val; u32 wow_pkt_dropped; u16 wow_evt_discarded; s16 noise_floor_calib; s16 cs_rssi; s16 cs_ave_beacon_rssi; u8 cs_ave_beacon_snr; u8 cs_last_roam_msec; u8 cs_snr; u8 wow_host_pkt_wakeups; u8 wow_host_evt_wakeups; u32 arp_received; u32 arp_matched; u32 arp_replied; }; struct ath6kl_mbox_info { u32 htc_addr; u32 htc_ext_addr; u32 htc_ext_sz; u32 block_size; u32 gmbox_addr; u32 gmbox_sz; }; /* * 802.11i defines an extended IV for use with non-WEP ciphers. * When the EXTIV bit is set in the key id byte an additional * 4 bytes immediately follow the IV for TKIP. For CCMP the * EXTIV bit is likewise set but the 8 bytes represent the * CCMP header rather than IV+extended-IV. */ #define ATH6KL_KEYBUF_SIZE 16 #define ATH6KL_MICBUF_SIZE (8+8) /* space for both tx and rx */ #define ATH6KL_KEY_XMIT 0x01 #define ATH6KL_KEY_RECV 0x02 #define ATH6KL_KEY_DEFAULT 0x80 /* default xmit key */ /* Initial group key for AP mode */ struct ath6kl_req_key { bool valid; u8 key_index; int key_type; u8 key[WLAN_MAX_KEY_LEN]; u8 key_len; }; enum ath6kl_hif_type { ATH6KL_HIF_TYPE_SDIO, ATH6KL_HIF_TYPE_USB, }; enum ath6kl_htc_type { ATH6KL_HTC_TYPE_MBOX, ATH6KL_HTC_TYPE_PIPE, }; /* Max number of filters that hw supports */ #define ATH6K_MAX_MC_FILTERS_PER_LIST 7 struct ath6kl_mc_filter { struct list_head list; char hw_addr[ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE]; }; struct ath6kl_htcap { bool ht_enable; u8 ampdu_factor; unsigned short cap_info; }; /* * Driver's maximum limit, note that some firmwares support only one vif * and the runtime (current) limit must be checked from ar->vif_max. */ #define ATH6KL_VIF_MAX 3 /* vif flags info */ enum ath6kl_vif_state { CONNECTED, CONNECT_PEND, WMM_ENABLED, NETQ_STOPPED, DTIM_EXPIRED, NETDEV_REGISTERED, CLEAR_BSSFILTER_ON_BEACON, DTIM_PERIOD_AVAIL, WLAN_ENABLED, STATS_UPDATE_PEND, HOST_SLEEP_MODE_CMD_PROCESSED, NETDEV_MCAST_ALL_ON, NETDEV_MCAST_ALL_OFF, SCHED_SCANNING, }; struct ath6kl_vif { struct list_head list; struct wireless_dev wdev; struct net_device *ndev; struct ath6kl *ar; /* Lock to protect vif specific net_stats and flags */ spinlock_t if_lock; u8 fw_vif_idx; unsigned long flags; int ssid_len; u8 ssid[IEEE80211_MAX_SSID_LEN]; u8 dot11_auth_mode; u8 auth_mode; u8 prwise_crypto; u8 prwise_crypto_len; u8 grp_crypto; u8 grp_crypto_len; u8 def_txkey_index; u8 next_mode; u8 nw_type; u8 bssid[ETH_ALEN]; u8 req_bssid[ETH_ALEN]; u16 ch_hint; u16 bss_ch; struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1]; struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1]; struct aggr_info *aggr_cntxt; struct ath6kl_htcap htcap[IEEE80211_NUM_BANDS]; struct timer_list disconnect_timer; struct timer_list sched_scan_timer; struct cfg80211_scan_request *scan_req; enum sme_state sme_state; int reconnect_flag; u32 last_roc_id; u32 last_cancel_roc_id; u32 send_action_id; bool probe_req_report; u16 assoc_bss_beacon_int; u16 listen_intvl_t; u16 bmiss_time_t; u32 txe_intvl; u16 bg_scan_period; u8 assoc_bss_dtim_period; struct net_device_stats net_stats; struct target_stats target_stats; struct wmi_connect_cmd profile; u16 rsn_capab; struct list_head mc_filter; }; static inline struct ath6kl_vif *ath6kl_vif_from_wdev(struct wireless_dev *wdev) { return container_of(wdev, struct ath6kl_vif, wdev); } #define WOW_LIST_ID 0 #define WOW_HOST_REQ_DELAY 500 /* ms */ #define ATH6KL_SCHED_SCAN_RESULT_DELAY 5000 /* ms */ /* Flag info */ enum ath6kl_dev_state { WMI_ENABLED, WMI_READY, WMI_CTRL_EP_FULL, TESTMODE, DESTROY_IN_PROGRESS, SKIP_SCAN, ROAM_TBL_PEND, FIRST_BOOT, RECOVERY_CLEANUP, }; enum ath6kl_state { ATH6KL_STATE_OFF, ATH6KL_STATE_ON, ATH6KL_STATE_SUSPENDING, ATH6KL_STATE_RESUMING, ATH6KL_STATE_DEEPSLEEP, ATH6KL_STATE_CUTPOWER, ATH6KL_STATE_WOW, ATH6KL_STATE_RECOVERY, }; /* Fw error recovery */ #define ATH6KL_HB_RESP_MISS_THRES 5 enum ath6kl_fw_err { ATH6KL_FW_ASSERT, ATH6KL_FW_HB_RESP_FAILURE, ATH6KL_FW_EP_FULL, }; struct ath6kl { struct device *dev; struct wiphy *wiphy; enum ath6kl_state state; unsigned int testmode; struct ath6kl_bmi bmi; const struct ath6kl_hif_ops *hif_ops; const struct ath6kl_htc_ops *htc_ops; struct wmi *wmi; int tx_pending[ENDPOINT_MAX]; int total_tx_data_pend; struct htc_target *htc_target; enum ath6kl_hif_type hif_type; void *hif_priv; struct list_head vif_list; /* Lock to avoid race in vif_list entries among add/del/traverse */ spinlock_t list_lock; u8 num_vif; unsigned int vif_max; u8 max_norm_iface; u8 avail_idx_map; /* * Protects at least amsdu_rx_buffer_queue, ath6kl_alloc_cookie() * calls, tx_pending and total_tx_data_pend. */ spinlock_t lock; struct semaphore sem; u8 lrssi_roam_threshold; struct ath6kl_version version; u32 target_type; u8 tx_pwr; struct ath6kl_node_mapping node_map[MAX_NODE_NUM]; u8 ibss_ps_enable; bool ibss_if_active; u8 node_num; u8 next_ep_id; struct ath6kl_cookie *cookie_list; u32 cookie_count; enum htc_endpoint_id ac2ep_map[WMM_NUM_AC]; bool ac_stream_active[WMM_NUM_AC]; u8 ac_stream_pri_map[WMM_NUM_AC]; u8 hiac_stream_active_pri; u8 ep2ac_map[ENDPOINT_MAX]; enum htc_endpoint_id ctrl_ep; struct ath6kl_htc_credit_info credit_state_info; u32 connect_ctrl_flags; u32 user_key_ctrl; u8 usr_bss_filter; struct ath6kl_sta sta_list[AP_MAX_NUM_STA]; u8 sta_list_index; struct ath6kl_req_key ap_mode_bkey; struct sk_buff_head mcastpsq; u32 want_ch_switch; u16 last_ch; /* * FIXME: protects access to mcastpsq but is actually useless as * all skbe_queue_*() functions provide serialisation themselves */ spinlock_t mcastpsq_lock; u8 intra_bss; struct wmi_ap_mode_stat ap_stats; u8 ap_country_code[3]; struct list_head amsdu_rx_buffer_queue; u8 rx_meta_ver; enum wlan_low_pwr_state wlan_pwr_state; u8 mac_addr[ETH_ALEN]; #define AR_MCAST_FILTER_MAC_ADDR_SIZE 4 struct { void *rx_report; size_t rx_report_len; } tm; struct ath6kl_hw { u32 id; const char *name; u32 dataset_patch_addr; u32 app_load_addr; u32 app_start_override_addr; u32 board_ext_data_addr; u32 reserved_ram_size; u32 board_addr; u32 refclk_hz; u32 uarttx_pin; u32 testscript_addr; enum wmi_phy_cap cap; u32 flags; struct ath6kl_hw_fw { const char *dir; const char *otp; const char *fw; const char *tcmd; const char *patch; const char *utf; const char *testscript; } fw; const char *fw_board; const char *fw_default_board; } hw; u16 conf_flags; u16 suspend_mode; u16 wow_suspend_mode; wait_queue_head_t event_wq; struct ath6kl_mbox_info mbox_info; struct ath6kl_cookie cookie_mem[MAX_COOKIE_NUM]; unsigned long flag; u8 *fw_board; size_t fw_board_len; u8 *fw_otp; size_t fw_otp_len; u8 *fw; size_t fw_len; u8 *fw_patch; size_t fw_patch_len; u8 *fw_testscript; size_t fw_testscript_len; unsigned int fw_api; unsigned long fw_capabilities[ATH6KL_CAPABILITY_LEN]; struct workqueue_struct *ath6kl_wq; struct dentry *debugfs_phy; bool p2p; bool wiphy_registered; struct ath6kl_fw_recovery { struct work_struct recovery_work; unsigned long err_reason; unsigned long hb_poll; struct timer_list hb_timer; u32 seq_num; bool hb_pending; u8 hb_misscnt; bool enable; } fw_recovery; #ifdef CONFIG_ATH6KL_DEBUG struct { struct sk_buff_head fwlog_queue; struct completion fwlog_completion; bool fwlog_open; u32 fwlog_mask; unsigned int dbgfs_diag_reg; u32 diag_reg_addr_wr; u32 diag_reg_val_wr; struct { unsigned int invalid_rate; } war_stats; u8 *roam_tbl; unsigned int roam_tbl_len; u8 keepalive; u8 disc_timeout; } debug; #endif /* CONFIG_ATH6KL_DEBUG */ }; static inline struct ath6kl *ath6kl_priv(struct net_device *dev) { return ((struct ath6kl_vif *) netdev_priv(dev))->ar; } static inline u32 ath6kl_get_hi_item_addr(struct ath6kl *ar, u32 item_offset) { u32 addr = 0; if (ar->target_type == TARGET_TYPE_AR6003) addr = ATH6KL_AR6003_HI_START_ADDR + item_offset; else if (ar->target_type == TARGET_TYPE_AR6004) addr = ATH6KL_AR6004_HI_START_ADDR + item_offset; return addr; } int ath6kl_configure_target(struct ath6kl *ar); void ath6kl_detect_error(unsigned long ptr); void disconnect_timer_handler(unsigned long ptr); void init_netdev(struct net_device *dev); void ath6kl_cookie_init(struct ath6kl *ar); void ath6kl_cookie_cleanup(struct ath6kl *ar); void ath6kl_rx(struct htc_target *target, struct htc_packet *packet); void ath6kl_tx_complete(struct htc_target *context, struct list_head *packet_queue); enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target, struct htc_packet *packet); void ath6kl_stop_txrx(struct ath6kl *ar); void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl *ar); int ath6kl_diag_write32(struct ath6kl *ar, u32 address, __le32 value); int ath6kl_diag_write(struct ath6kl *ar, u32 address, void *data, u32 length); int ath6kl_diag_read32(struct ath6kl *ar, u32 address, u32 *value); int ath6kl_diag_read(struct ath6kl *ar, u32 address, void *data, u32 length); int ath6kl_read_fwlogs(struct ath6kl *ar); void ath6kl_init_profile_info(struct ath6kl_vif *vif); void ath6kl_tx_data_cleanup(struct ath6kl *ar); struct ath6kl_cookie *ath6kl_alloc_cookie(struct ath6kl *ar); void ath6kl_free_cookie(struct ath6kl *ar, struct ath6kl_cookie *cookie); int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev); struct aggr_info *aggr_init(struct ath6kl_vif *vif); void aggr_conn_init(struct ath6kl_vif *vif, struct aggr_info *aggr_info, struct aggr_info_conn *aggr_conn); void ath6kl_rx_refill(struct htc_target *target, enum htc_endpoint_id endpoint); void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count); struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target, enum htc_endpoint_id endpoint, int len); void aggr_module_destroy(struct aggr_info *aggr_info); void aggr_reset_state(struct aggr_info_conn *aggr_conn); struct ath6kl_sta *ath6kl_find_sta(struct ath6kl_vif *vif, u8 *node_addr); struct ath6kl_sta *ath6kl_find_sta_by_aid(struct ath6kl *ar, u8 aid); void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver, enum wmi_phy_cap cap); int ath6kl_control_tx(void *devt, struct sk_buff *skb, enum htc_endpoint_id eid); void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel, u8 *bssid, u16 listen_int, u16 beacon_int, enum network_type net_type, u8 beacon_ie_len, u8 assoc_req_len, u8 assoc_resp_len, u8 *assoc_info); void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel); void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr, u8 keymgmt, u8 ucipher, u8 auth, u8 assoc_req_len, u8 *assoc_info, u8 apsd_info); void ath6kl_disconnect_event(struct ath6kl_vif *vif, u8 reason, u8 *bssid, u8 assoc_resp_len, u8 *assoc_info, u16 prot_reason_status); void ath6kl_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid, bool ismcast); void ath6kl_txpwr_rx_evt(void *devt, u8 tx_pwr); void ath6kl_scan_complete_evt(struct ath6kl_vif *vif, int status); void ath6kl_tgt_stats_event(struct ath6kl_vif *vif, u8 *ptr, u32 len); void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active); enum htc_endpoint_id ath6kl_ac2_endpoint_id(void *devt, u8 ac); void ath6kl_pspoll_event(struct ath6kl_vif *vif, u8 aid); void ath6kl_dtimexpiry_event(struct ath6kl_vif *vif); void ath6kl_disconnect(struct ath6kl_vif *vif); void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid); void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid, u16 seq_no, u8 win_sz); void ath6kl_wakeup_event(void *dev); void ath6kl_reset_device(struct ath6kl *ar, u32 target_type, bool wait_fot_compltn, bool cold_reset); void ath6kl_init_control_info(struct ath6kl_vif *vif); struct ath6kl_vif *ath6kl_vif_first(struct ath6kl *ar); void ath6kl_cfg80211_vif_stop(struct ath6kl_vif *vif, bool wmi_ready); int ath6kl_init_hw_start(struct ath6kl *ar); int ath6kl_init_hw_stop(struct ath6kl *ar); int ath6kl_init_fetch_firmwares(struct ath6kl *ar); int ath6kl_init_hw_params(struct ath6kl *ar); void ath6kl_check_wow_status(struct ath6kl *ar); void ath6kl_core_tx_complete(struct ath6kl *ar, struct sk_buff *skb); void ath6kl_core_rx_complete(struct ath6kl *ar, struct sk_buff *skb, u8 pipe); struct ath6kl *ath6kl_core_create(struct device *dev); int ath6kl_core_init(struct ath6kl *ar, enum ath6kl_htc_type htc_type); void ath6kl_core_cleanup(struct ath6kl *ar); void ath6kl_core_destroy(struct ath6kl *ar); /* Fw error recovery */ void ath6kl_init_hw_restart(struct ath6kl *ar); void ath6kl_recovery_err_notify(struct ath6kl *ar, enum ath6kl_fw_err reason); void ath6kl_recovery_hb_event(struct ath6kl *ar, u32 cookie); void ath6kl_recovery_init(struct ath6kl *ar); void ath6kl_recovery_cleanup(struct ath6kl *ar); void ath6kl_recovery_suspend(struct ath6kl *ar); void ath6kl_recovery_resume(struct ath6kl *ar); #endif /* CORE_H */
dinh-linux/linux-socfpga
drivers/net/wireless/ath/ath6kl/core.h
C
gpl-2.0
25,799
/* * os-win32.c * * Copyright (c) 2003-2008 Fabrice Bellard * Copyright (c) 2010 Red Hat, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include <windows.h> #include <unistd.h> #include <fcntl.h> #include <signal.h> #include <time.h> #include <errno.h> #include <sys/time.h> #include "config-host.h" #include "sysemu.h" #include "qemu-options.h" /***********************************************************/ /* Functions missing in mingw */ int setenv(const char *name, const char *value, int overwrite) { int result = 0; if (overwrite || !getenv(name)) { size_t length = strlen(name) + strlen(value) + 2; char *string = g_malloc(length); snprintf(string, length, "%s=%s", name, value); result = putenv(string); /* Windows takes a copy and does not continue to use our string. * Therefore it can be safely freed on this platform. POSIX code * typically has to leak the string because according to the spec it * becomes part of the environment. */ g_free(string); } return result; } static BOOL WINAPI qemu_ctrl_handler(DWORD type) { exit(STATUS_CONTROL_C_EXIT); return TRUE; } void os_setup_early_signal_handling(void) { /* Note: cpu_interrupt() is currently not SMP safe, so we force QEMU to run on a single CPU */ HANDLE h; DWORD_PTR mask, smask; int i; SetConsoleCtrlHandler(qemu_ctrl_handler, TRUE); h = GetCurrentProcess(); if (GetProcessAffinityMask(h, &mask, &smask)) { for(i = 0; i < 32; i++) { if (mask & (1 << i)) break; } if (i != 32) { mask = 1 << i; SetProcessAffinityMask(h, mask); } } } /* Look for support files in the same directory as the executable. */ char *os_find_datadir(const char *argv0) { char *p; char buf[MAX_PATH]; DWORD len; len = GetModuleFileName(NULL, buf, sizeof(buf) - 1); if (len == 0) { return NULL; } buf[len] = 0; p = buf + len - 1; while (p != buf && *p != '\\') p--; *p = 0; if (access(buf, R_OK) == 0) { return g_strdup(buf); } return NULL; } void os_set_line_buffering(void) { setbuf(stdout, NULL); setbuf(stderr, NULL); } /* * Parse OS specific command line options. * return 0 if option handled, -1 otherwise */ void os_parse_cmd_args(int index, const char *optarg) { return; } void os_pidfile_error(void) { fprintf(stderr, "Could not acquire pid file: %s\n", strerror(errno)); } int qemu_create_pidfile(const char *filename) { char buffer[128]; int len; HANDLE file; OVERLAPPED overlap; BOOL ret; memset(&overlap, 0, sizeof(overlap)); file = CreateFile(filename, GENERIC_WRITE, FILE_SHARE_READ, NULL, OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL); if (file == INVALID_HANDLE_VALUE) { return -1; } len = snprintf(buffer, sizeof(buffer), "%d\n", getpid()); ret = WriteFile(file, (LPCVOID)buffer, (DWORD)len, NULL, &overlap); CloseHandle(file); if (ret == 0) { return -1; } return 0; }
marsleezm/qemu
os-win32.c
C
gpl-2.0
4,229
/* * SWIOTLB-based DMA API implementation * * Copyright (C) 2012 ARM Ltd. * Author: Catalin Marinas <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/gfp.h> #include <linux/acpi.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/genalloc.h> #include <linux/dma-mapping.h> #include <linux/dma-contiguous.h> #include <linux/vmalloc.h> #include <linux/swiotlb.h> #include <asm/cacheflush.h> static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot, bool coherent) { if (!coherent || dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) return pgprot_writecombine(prot); return prot; } static struct gen_pool *atomic_pool; #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K static size_t atomic_pool_size = DEFAULT_DMA_COHERENT_POOL_SIZE; static int __init early_coherent_pool(char *p) { atomic_pool_size = memparse(p, &p); return 0; } early_param("coherent_pool", early_coherent_pool); static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags) { unsigned long val; void *ptr = NULL; if (!atomic_pool) { WARN(1, "coherent pool not initialised!\n"); return NULL; } val = gen_pool_alloc(atomic_pool, size); if (val) { phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val); *ret_page = phys_to_page(phys); ptr = (void *)val; memset(ptr, 0, size); } return ptr; } static bool __in_atomic_pool(void *start, size_t size) { return addr_in_gen_pool(atomic_pool, (unsigned long)start, size); } static int __free_from_pool(void *start, size_t size) { if (!__in_atomic_pool(start, size)) return 0; gen_pool_free(atomic_pool, (unsigned long)start, size); return 1; } static void *__dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags, struct dma_attrs *attrs) { if (dev == NULL) { WARN_ONCE(1, "Use an actual device structure for DMA allocation\n"); return NULL; } if (IS_ENABLED(CONFIG_ZONE_DMA) && dev->coherent_dma_mask <= DMA_BIT_MASK(32)) flags |= GFP_DMA; if (dev_get_cma_area(dev) && gfpflags_allow_blocking(flags)) { struct page *page; void *addr; page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, get_order(size)); if (!page) return NULL; *dma_handle = phys_to_dma(dev, page_to_phys(page)); addr = page_address(page); memset(addr, 0, size); return addr; } else { return swiotlb_alloc_coherent(dev, size, dma_handle, flags); } } static void __dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) { bool freed; phys_addr_t paddr = dma_to_phys(dev, dma_handle); if (dev == NULL) { WARN_ONCE(1, "Use an actual device structure for DMA allocation\n"); return; } freed = dma_release_from_contiguous(dev, phys_to_page(paddr), size >> PAGE_SHIFT); if (!freed) swiotlb_free_coherent(dev, size, vaddr, dma_handle); } static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags, struct dma_attrs *attrs) { struct page *page; void *ptr, *coherent_ptr; bool coherent = is_device_dma_coherent(dev); pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false); size = PAGE_ALIGN(size); if (!coherent && !gfpflags_allow_blocking(flags)) { struct page *page = NULL; void *addr = __alloc_from_pool(size, &page, flags); if (addr) *dma_handle = phys_to_dma(dev, page_to_phys(page)); return addr; } ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs); if (!ptr) goto no_mem; /* no need for non-cacheable mapping if coherent */ if (coherent) return ptr; /* remove any dirty cache lines on the kernel alias */ __dma_flush_range(ptr, ptr + size); /* create a coherent mapping */ page = virt_to_page(ptr); coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP, prot, NULL); if (!coherent_ptr) goto no_map; return coherent_ptr; no_map: __dma_free_coherent(dev, size, ptr, *dma_handle, attrs); no_mem: *dma_handle = DMA_ERROR_CODE; return NULL; } static void __dma_free(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) { void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle)); size = PAGE_ALIGN(size); if (!is_device_dma_coherent(dev)) { if (__free_from_pool(vaddr, size)) return; vunmap(vaddr); } __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs); } static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { dma_addr_t dev_addr; dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs); if (!is_device_dma_coherent(dev)) __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); return dev_addr; } static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { if (!is_device_dma_coherent(dev)) __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); swiotlb_unmap_page(dev, dev_addr, size, dir, attrs); } static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs) { struct scatterlist *sg; int i, ret; ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs); if (!is_device_dma_coherent(dev)) for_each_sg(sgl, sg, ret, i) __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), sg->length, dir); return ret; } static void __swiotlb_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs) { struct scatterlist *sg; int i; if (!is_device_dma_coherent(dev)) for_each_sg(sgl, sg, nelems, i) __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), sg->length, dir); swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs); } static void __swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir) { if (!is_device_dma_coherent(dev)) __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir); } static void __swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir) { swiotlb_sync_single_for_device(dev, dev_addr, size, dir); if (!is_device_dma_coherent(dev)) __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); } static void __swiotlb_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir) { struct scatterlist *sg; int i; if (!is_device_dma_coherent(dev)) for_each_sg(sgl, sg, nelems, i) __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), sg->length, dir); swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir); } static void __swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir) { struct scatterlist *sg; int i; swiotlb_sync_sg_for_device(dev, sgl, nelems, dir); if (!is_device_dma_coherent(dev)) for_each_sg(sgl, sg, nelems, i) __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), sg->length, dir); } static int __swiotlb_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) { int ret = -ENXIO; unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT; unsigned long off = vma->vm_pgoff; vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, is_device_dma_coherent(dev)); if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) return ret; if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) { ret = remap_pfn_range(vma, vma->vm_start, pfn + off, vma->vm_end - vma->vm_start, vma->vm_page_prot); } return ret; } static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t handle, size_t size, struct dma_attrs *attrs) { int ret = sg_alloc_table(sgt, 1, GFP_KERNEL); if (!ret) sg_set_page(sgt->sgl, phys_to_page(dma_to_phys(dev, handle)), PAGE_ALIGN(size), 0); return ret; } static struct dma_map_ops swiotlb_dma_ops = { .alloc = __dma_alloc, .free = __dma_free, .mmap = __swiotlb_mmap, .get_sgtable = __swiotlb_get_sgtable, .map_page = __swiotlb_map_page, .unmap_page = __swiotlb_unmap_page, .map_sg = __swiotlb_map_sg_attrs, .unmap_sg = __swiotlb_unmap_sg_attrs, .sync_single_for_cpu = __swiotlb_sync_single_for_cpu, .sync_single_for_device = __swiotlb_sync_single_for_device, .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu, .sync_sg_for_device = __swiotlb_sync_sg_for_device, .dma_supported = swiotlb_dma_supported, .mapping_error = swiotlb_dma_mapping_error, }; static int __init atomic_pool_init(void) { pgprot_t prot = __pgprot(PROT_NORMAL_NC); unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT; struct page *page; void *addr; unsigned int pool_size_order = get_order(atomic_pool_size); if (dev_get_cma_area(NULL)) page = dma_alloc_from_contiguous(NULL, nr_pages, pool_size_order); else page = alloc_pages(GFP_DMA, pool_size_order); if (page) { int ret; void *page_addr = page_address(page); memset(page_addr, 0, atomic_pool_size); __dma_flush_range(page_addr, page_addr + atomic_pool_size); atomic_pool = gen_pool_create(PAGE_SHIFT, -1); if (!atomic_pool) goto free_page; addr = dma_common_contiguous_remap(page, atomic_pool_size, VM_USERMAP, prot, atomic_pool_init); if (!addr) goto destroy_genpool; ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr, page_to_phys(page), atomic_pool_size, -1); if (ret) goto remove_mapping; gen_pool_set_algo(atomic_pool, gen_pool_first_fit_order_align, (void *)PAGE_SHIFT); pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n", atomic_pool_size / 1024); return 0; } goto out; remove_mapping: dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP); destroy_genpool: gen_pool_destroy(atomic_pool); atomic_pool = NULL; free_page: if (!dma_release_from_contiguous(NULL, page, nr_pages)) __free_pages(page, pool_size_order); out: pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n", atomic_pool_size / 1024); return -ENOMEM; } /******************************************** * The following APIs are for dummy DMA ops * ********************************************/ static void *__dummy_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags, struct dma_attrs *attrs) { return NULL; } static void __dummy_free(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) { } static int __dummy_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) { return -ENXIO; } static dma_addr_t __dummy_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { return DMA_ERROR_CODE; } static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { } static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs) { return 0; } static void __dummy_unmap_sg(struct device *dev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs) { } static void __dummy_sync_single(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir) { } static void __dummy_sync_sg(struct device *dev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir) { } static int __dummy_mapping_error(struct device *hwdev, dma_addr_t dma_addr) { return 1; } static int __dummy_dma_supported(struct device *hwdev, u64 mask) { return 0; } struct dma_map_ops dummy_dma_ops = { .alloc = __dummy_alloc, .free = __dummy_free, .mmap = __dummy_mmap, .map_page = __dummy_map_page, .unmap_page = __dummy_unmap_page, .map_sg = __dummy_map_sg, .unmap_sg = __dummy_unmap_sg, .sync_single_for_cpu = __dummy_sync_single, .sync_single_for_device = __dummy_sync_single, .sync_sg_for_cpu = __dummy_sync_sg, .sync_sg_for_device = __dummy_sync_sg, .mapping_error = __dummy_mapping_error, .dma_supported = __dummy_dma_supported, }; EXPORT_SYMBOL(dummy_dma_ops); static int __init arm64_dma_init(void) { return atomic_pool_init(); } arch_initcall(arm64_dma_init); #define PREALLOC_DMA_DEBUG_ENTRIES 4096 static int __init dma_debug_do_init(void) { dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); return 0; } fs_initcall(dma_debug_do_init); #ifdef CONFIG_IOMMU_DMA #include <linux/dma-iommu.h> #include <linux/platform_device.h> #include <linux/amba/bus.h> /* Thankfully, all cache ops are by VA so we can ignore phys here */ static void flush_page(struct device *dev, const void *virt, phys_addr_t phys) { __dma_flush_range(virt, virt + PAGE_SIZE); } static void *__iommu_alloc_attrs(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) { bool coherent = is_device_dma_coherent(dev); int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent); size_t iosize = size; void *addr; if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n")) return NULL; size = PAGE_ALIGN(size); /* * Some drivers rely on this, and we probably don't want the * possibility of stale kernel data being read by devices anyway. */ gfp |= __GFP_ZERO; if (gfpflags_allow_blocking(gfp)) { struct page **pages; pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent); pages = iommu_dma_alloc(dev, iosize, gfp, ioprot, handle, flush_page); if (!pages) return NULL; addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot, __builtin_return_address(0)); if (!addr) iommu_dma_free(dev, pages, iosize, handle); } else { struct page *page; /* * In atomic context we can't remap anything, so we'll only * get the virtually contiguous buffer we need by way of a * physically contiguous allocation. */ if (coherent) { page = alloc_pages(gfp, get_order(size)); addr = page ? page_address(page) : NULL; } else { addr = __alloc_from_pool(size, &page, gfp); } if (!addr) return NULL; *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot); if (iommu_dma_mapping_error(dev, *handle)) { if (coherent) __free_pages(page, get_order(size)); else __free_from_pool(addr, size); addr = NULL; } } return addr; } static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs) { size_t iosize = size; size = PAGE_ALIGN(size); /* * @cpu_addr will be one of 3 things depending on how it was allocated: * - A remapped array of pages from iommu_dma_alloc(), for all * non-atomic allocations. * - A non-cacheable alias from the atomic pool, for atomic * allocations by non-coherent devices. * - A normal lowmem address, for atomic allocations by * coherent devices. * Hence how dodgy the below logic looks... */ if (__in_atomic_pool(cpu_addr, size)) { iommu_dma_unmap_page(dev, handle, iosize, 0, NULL); __free_from_pool(cpu_addr, size); } else if (is_vmalloc_addr(cpu_addr)){ struct vm_struct *area = find_vm_area(cpu_addr); if (WARN_ON(!area || !area->pages)) return; iommu_dma_free(dev, area->pages, iosize, &handle); dma_common_free_remap(cpu_addr, size, VM_USERMAP); } else { iommu_dma_unmap_page(dev, handle, iosize, 0, NULL); __free_pages(virt_to_page(cpu_addr), get_order(size)); } } static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) { struct vm_struct *area; int ret; vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, is_device_dma_coherent(dev)); if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) return ret; area = find_vm_area(cpu_addr); if (WARN_ON(!area || !area->pages)) return -ENXIO; return iommu_dma_mmap(area->pages, size, vma); } static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) { unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; struct vm_struct *area = find_vm_area(cpu_addr); if (WARN_ON(!area || !area->pages)) return -ENXIO; return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size, GFP_KERNEL); } static void __iommu_sync_single_for_cpu(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir) { phys_addr_t phys; if (is_device_dma_coherent(dev)) return; phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr); __dma_unmap_area(phys_to_virt(phys), size, dir); } static void __iommu_sync_single_for_device(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir) { phys_addr_t phys; if (is_device_dma_coherent(dev)) return; phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr); __dma_map_area(phys_to_virt(phys), size, dir); } static dma_addr_t __iommu_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { bool coherent = is_device_dma_coherent(dev); int prot = dma_direction_to_prot(dir, coherent); dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot); if (!iommu_dma_mapping_error(dev, dev_addr) && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) __iommu_sync_single_for_device(dev, dev_addr, size, dir); return dev_addr; } static void __iommu_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) __iommu_sync_single_for_cpu(dev, dev_addr, size, dir); iommu_dma_unmap_page(dev, dev_addr, size, dir, attrs); } static void __iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir) { struct scatterlist *sg; int i; if (is_device_dma_coherent(dev)) return; for_each_sg(sgl, sg, nelems, i) __dma_unmap_area(sg_virt(sg), sg->length, dir); } static void __iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir) { struct scatterlist *sg; int i; if (is_device_dma_coherent(dev)) return; for_each_sg(sgl, sg, nelems, i) __dma_map_area(sg_virt(sg), sg->length, dir); } static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs) { bool coherent = is_device_dma_coherent(dev); if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) __iommu_sync_sg_for_device(dev, sgl, nelems, dir); return iommu_dma_map_sg(dev, sgl, nelems, dma_direction_to_prot(dir, coherent)); } static void __iommu_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs) { if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) __iommu_sync_sg_for_cpu(dev, sgl, nelems, dir); iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs); } static struct dma_map_ops iommu_dma_ops = { .alloc = __iommu_alloc_attrs, .free = __iommu_free_attrs, .mmap = __iommu_mmap_attrs, .get_sgtable = __iommu_get_sgtable, .map_page = __iommu_map_page, .unmap_page = __iommu_unmap_page, .map_sg = __iommu_map_sg_attrs, .unmap_sg = __iommu_unmap_sg_attrs, .sync_single_for_cpu = __iommu_sync_single_for_cpu, .sync_single_for_device = __iommu_sync_single_for_device, .sync_sg_for_cpu = __iommu_sync_sg_for_cpu, .sync_sg_for_device = __iommu_sync_sg_for_device, .dma_supported = iommu_dma_supported, .mapping_error = iommu_dma_mapping_error, }; /* * TODO: Right now __iommu_setup_dma_ops() gets called too early to do * everything it needs to - the device is only partially created and the * IOMMU driver hasn't seen it yet, so it can't have a group. Thus we * need this delayed attachment dance. Once IOMMU probe ordering is sorted * to move the arch_setup_dma_ops() call later, all the notifier bits below * become unnecessary, and will go away. */ struct iommu_dma_notifier_data { struct list_head list; struct device *dev; const struct iommu_ops *ops; u64 dma_base; u64 size; }; static LIST_HEAD(iommu_dma_masters); static DEFINE_MUTEX(iommu_dma_notifier_lock); /* * Temporarily "borrow" a domain feature flag to to tell if we had to resort * to creating our own domain here, in case we need to clean it up again. */ #define __IOMMU_DOMAIN_FAKE_DEFAULT (1U << 31) static bool do_iommu_attach(struct device *dev, const struct iommu_ops *ops, u64 dma_base, u64 size) { struct iommu_domain *domain = iommu_get_domain_for_dev(dev); /* * Best case: The device is either part of a group which was * already attached to a domain in a previous call, or it's * been put in a default DMA domain by the IOMMU core. */ if (!domain) { /* * Urgh. The IOMMU core isn't going to do default domains * for non-PCI devices anyway, until it has some means of * abstracting the entirely implementation-specific * sideband data/SoC topology/unicorn dust that may or * may not differentiate upstream masters. * So until then, HORRIBLE HACKS! */ domain = ops->domain_alloc(IOMMU_DOMAIN_DMA); if (!domain) goto out_no_domain; domain->ops = ops; domain->type = IOMMU_DOMAIN_DMA | __IOMMU_DOMAIN_FAKE_DEFAULT; if (iommu_attach_device(domain, dev)) goto out_put_domain; } if (iommu_dma_init_domain(domain, dma_base, size)) goto out_detach; dev->archdata.dma_ops = &iommu_dma_ops; return true; out_detach: iommu_detach_device(domain, dev); out_put_domain: if (domain->type & __IOMMU_DOMAIN_FAKE_DEFAULT) iommu_domain_free(domain); out_no_domain: pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n", dev_name(dev)); return false; } static void queue_iommu_attach(struct device *dev, const struct iommu_ops *ops, u64 dma_base, u64 size) { struct iommu_dma_notifier_data *iommudata; iommudata = kzalloc(sizeof(*iommudata), GFP_KERNEL); if (!iommudata) return; iommudata->dev = dev; iommudata->ops = ops; iommudata->dma_base = dma_base; iommudata->size = size; mutex_lock(&iommu_dma_notifier_lock); list_add(&iommudata->list, &iommu_dma_masters); mutex_unlock(&iommu_dma_notifier_lock); } static int __iommu_attach_notifier(struct notifier_block *nb, unsigned long action, void *data) { struct iommu_dma_notifier_data *master, *tmp; if (action != BUS_NOTIFY_ADD_DEVICE) return 0; mutex_lock(&iommu_dma_notifier_lock); list_for_each_entry_safe(master, tmp, &iommu_dma_masters, list) { if (do_iommu_attach(master->dev, master->ops, master->dma_base, master->size)) { list_del(&master->list); kfree(master); } } mutex_unlock(&iommu_dma_notifier_lock); return 0; } static int register_iommu_dma_ops_notifier(struct bus_type *bus) { struct notifier_block *nb = kzalloc(sizeof(*nb), GFP_KERNEL); int ret; if (!nb) return -ENOMEM; /* * The device must be attached to a domain before the driver probe * routine gets a chance to start allocating DMA buffers. However, * the IOMMU driver also needs a chance to configure the iommu_group * via its add_device callback first, so we need to make the attach * happen between those two points. Since the IOMMU core uses a bus * notifier with default priority for add_device, do the same but * with a lower priority to ensure the appropriate ordering. */ nb->notifier_call = __iommu_attach_notifier; nb->priority = -100; ret = bus_register_notifier(bus, nb); if (ret) { pr_warn("Failed to register DMA domain notifier; IOMMU DMA ops unavailable on bus '%s'\n", bus->name); kfree(nb); } return ret; } static int __init __iommu_dma_init(void) { int ret; ret = iommu_dma_init(); if (!ret) ret = register_iommu_dma_ops_notifier(&platform_bus_type); if (!ret) ret = register_iommu_dma_ops_notifier(&amba_bustype); return ret; } arch_initcall(__iommu_dma_init); static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, const struct iommu_ops *ops) { struct iommu_group *group; if (!ops) return; /* * TODO: As a concession to the future, we're ready to handle being * called both early and late (i.e. after bus_add_device). Once all * the platform bus code is reworked to call us late and the notifier * junk above goes away, move the body of do_iommu_attach here. */ group = iommu_group_get(dev); if (group) { do_iommu_attach(dev, ops, dma_base, size); iommu_group_put(group); } else { queue_iommu_attach(dev, ops, dma_base, size); } } void arch_teardown_dma_ops(struct device *dev) { struct iommu_domain *domain = iommu_get_domain_for_dev(dev); if (domain) { iommu_detach_device(domain, dev); if (domain->type & __IOMMU_DOMAIN_FAKE_DEFAULT) iommu_domain_free(domain); } dev->archdata.dma_ops = NULL; } #else static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, struct iommu_ops *iommu) { } #endif /* CONFIG_IOMMU_DMA */ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, struct iommu_ops *iommu, bool coherent) { if (!dev->archdata.dma_ops) dev->archdata.dma_ops = &swiotlb_dma_ops; dev->archdata.dma_coherent = coherent; __iommu_setup_dma_ops(dev, dma_base, size, iommu); }
AiJiaZone/linux-4.0
virt/arch/arm64/mm/dma-mapping.c
C
gpl-2.0
27,138
require 'date' require 'active_support/inflector/methods' require 'active_support/core_ext/date/zones' require 'active_support/core_ext/module/remove_method' class Date DATE_FORMATS = { :short => "%e %b", :long => "%B %e, %Y", :db => "%Y-%m-%d", :number => "%Y%m%d", :long_ordinal => lambda { |date| date.strftime("%B #{ActiveSupport::Inflector.ordinalize(date.day)}, %Y") }, # => "April 25th, 2007" :rfc822 => "%e %b %Y" } # Ruby 1.9 has Date#to_time which converts to localtime only. remove_possible_method :to_time # Ruby 1.9 has Date#xmlschema which converts to a string without the time component. remove_possible_method :xmlschema # Convert to a formatted string. See DATE_FORMATS for predefined formats. # # This method is aliased to <tt>to_s</tt>. # # ==== Examples # date = Date.new(2007, 11, 10) # => Sat, 10 Nov 2007 # # date.to_formatted_s(:db) # => "2007-11-10" # date.to_s(:db) # => "2007-11-10" # # date.to_formatted_s(:short) # => "10 Nov" # date.to_formatted_s(:long) # => "November 10, 2007" # date.to_formatted_s(:long_ordinal) # => "November 10th, 2007" # date.to_formatted_s(:rfc822) # => "10 Nov 2007" # # == Adding your own time formats to to_formatted_s # You can add your own formats to the Date::DATE_FORMATS hash. # Use the format name as the hash key and either a strftime string # or Proc instance that takes a date argument as the value. # # # config/initializers/time_formats.rb # Date::DATE_FORMATS[:month_and_year] = "%B %Y" # Date::DATE_FORMATS[:short_ordinal] = lambda { |date| date.strftime("%B #{date.day.ordinalize}") } def to_formatted_s(format = :default) if formatter = DATE_FORMATS[format] if formatter.respond_to?(:call) formatter.call(self).to_s else strftime(formatter) end else to_default_s end end alias_method :to_default_s, :to_s alias_method :to_s, :to_formatted_s # Overrides the default inspect method with a human readable one, e.g., "Mon, 21 Feb 2005" def readable_inspect strftime("%a, %d %b %Y") end alias_method :default_inspect, :inspect alias_method :inspect, :readable_inspect # A method to keep Time, Date and DateTime instances interchangeable on conversions. # In this case, it simply returns +self+. def to_date self end if RUBY_VERSION < '1.9' # Converts a Date instance to a Time, where the time is set to the beginning of the day. # The timezone can be either :local or :utc (default :local). # # ==== Examples # date = Date.new(2007, 11, 10) # => Sat, 10 Nov 2007 # # date.to_time # => Sat Nov 10 00:00:00 0800 2007 # date.to_time(:local) # => Sat Nov 10 00:00:00 0800 2007 # # date.to_time(:utc) # => Sat Nov 10 00:00:00 UTC 2007 def to_time(form = :local) ::Time.send("#{form}_time", year, month, day) end # Converts a Date instance to a DateTime, where the time is set to the beginning of the day # and UTC offset is set to 0. # # ==== Examples # date = Date.new(2007, 11, 10) # => Sat, 10 Nov 2007 # # date.to_datetime # => Sat, 10 Nov 2007 00:00:00 0000 def to_datetime ::DateTime.civil(year, month, day, 0, 0, 0, 0) end if RUBY_VERSION < '1.9' def iso8601 strftime('%F') end if RUBY_VERSION < '1.9' alias_method :rfc3339, :iso8601 if RUBY_VERSION < '1.9' def xmlschema to_time_in_current_zone.xmlschema end end
BibNumUMontreal/DMPonline_v4
vendor/ruby/2.1.0/gems/activesupport-3.2.22.4/lib/active_support/core_ext/date/conversions.rb
Ruby
agpl-3.0
3,620
import abc import collections from itertools import permutations import pickle from random import choice import sys from test import support import unittest from weakref import proxy import functools py_functools = support.import_fresh_module('functools', blocked=['_functools']) c_functools = support.import_fresh_module('functools', fresh=['_functools']) decimal = support.import_fresh_module('decimal', fresh=['_decimal']) def capture(*args, **kw): """capture all positional and keyword arguments""" return args, kw def signature(part): """ return the signature of a partial object """ return (part.func, part.args, part.keywords, part.__dict__) class TestPartial: def test_basic_examples(self): p = self.partial(capture, 1, 2, a=10, b=20) self.assertTrue(callable(p)) self.assertEqual(p(3, 4, b=30, c=40), ((1, 2, 3, 4), dict(a=10, b=30, c=40))) p = self.partial(map, lambda x: x*10) self.assertEqual(list(p([1,2,3,4])), [10, 20, 30, 40]) def test_attributes(self): p = self.partial(capture, 1, 2, a=10, b=20) # attributes should be readable self.assertEqual(p.func, capture) self.assertEqual(p.args, (1, 2)) self.assertEqual(p.keywords, dict(a=10, b=20)) def test_argument_checking(self): self.assertRaises(TypeError, self.partial) # need at least a func arg try: self.partial(2)() except TypeError: pass else: self.fail('First arg not checked for callability') def test_protection_of_callers_dict_argument(self): # a caller's dictionary should not be altered by partial def func(a=10, b=20): return a d = {'a':3} p = self.partial(func, a=5) self.assertEqual(p(**d), 3) self.assertEqual(d, {'a':3}) p(b=7) self.assertEqual(d, {'a':3}) def test_arg_combinations(self): # exercise special code paths for zero args in either partial # object or the caller p = self.partial(capture) self.assertEqual(p(), ((), {})) self.assertEqual(p(1,2), ((1,2), {})) p = self.partial(capture, 1, 2) self.assertEqual(p(), ((1,2), {})) self.assertEqual(p(3,4), ((1,2,3,4), {})) def test_kw_combinations(self): # exercise special code paths for no keyword args in # either the partial object or the caller p = self.partial(capture) self.assertEqual(p(), ((), {})) self.assertEqual(p(a=1), ((), {'a':1})) p = self.partial(capture, a=1) self.assertEqual(p(), ((), {'a':1})) self.assertEqual(p(b=2), ((), {'a':1, 'b':2})) # keyword args in the call override those in the partial object self.assertEqual(p(a=3, b=2), ((), {'a':3, 'b':2})) def test_positional(self): # make sure positional arguments are captured correctly for args in [(), (0,), (0,1), (0,1,2), (0,1,2,3)]: p = self.partial(capture, *args) expected = args + ('x',) got, empty = p('x') self.assertTrue(expected == got and empty == {}) def test_keyword(self): # make sure keyword arguments are captured correctly for a in ['a', 0, None, 3.5]: p = self.partial(capture, a=a) expected = {'a':a,'x':None} empty, got = p(x=None) self.assertTrue(expected == got and empty == ()) def test_no_side_effects(self): # make sure there are no side effects that affect subsequent calls p = self.partial(capture, 0, a=1) args1, kw1 = p(1, b=2) self.assertTrue(args1 == (0,1) and kw1 == {'a':1,'b':2}) args2, kw2 = p() self.assertTrue(args2 == (0,) and kw2 == {'a':1}) def test_error_propagation(self): def f(x, y): x / y self.assertRaises(ZeroDivisionError, self.partial(f, 1, 0)) self.assertRaises(ZeroDivisionError, self.partial(f, 1), 0) self.assertRaises(ZeroDivisionError, self.partial(f), 1, 0) self.assertRaises(ZeroDivisionError, self.partial(f, y=0), 1) def test_weakref(self): f = self.partial(int, base=16) p = proxy(f) self.assertEqual(f.func, p.func) f = None self.assertRaises(ReferenceError, getattr, p, 'func') def test_with_bound_and_unbound_methods(self): data = list(map(str, range(10))) join = self.partial(str.join, '') self.assertEqual(join(data), '0123456789') join = self.partial(''.join) self.assertEqual(join(data), '0123456789') @unittest.skipUnless(c_functools, 'requires the C _functools module') class TestPartialC(TestPartial, unittest.TestCase): if c_functools: partial = c_functools.partial def test_attributes_unwritable(self): # attributes should not be writable p = self.partial(capture, 1, 2, a=10, b=20) self.assertRaises(AttributeError, setattr, p, 'func', map) self.assertRaises(AttributeError, setattr, p, 'args', (1, 2)) self.assertRaises(AttributeError, setattr, p, 'keywords', dict(a=1, b=2)) p = self.partial(hex) try: del p.__dict__ except TypeError: pass else: self.fail('partial object allowed __dict__ to be deleted') def test_repr(self): args = (object(), object()) args_repr = ', '.join(repr(a) for a in args) #kwargs = {'a': object(), 'b': object()} kwargs = {'a': object()} kwargs_repr = ', '.join("%s=%r" % (k, v) for k, v in kwargs.items()) if self.partial is c_functools.partial: name = 'functools.partial' else: name = self.partial.__name__ f = self.partial(capture) self.assertEqual('{}({!r})'.format(name, capture), repr(f)) f = self.partial(capture, *args) self.assertEqual('{}({!r}, {})'.format(name, capture, args_repr), repr(f)) f = self.partial(capture, **kwargs) self.assertEqual('{}({!r}, {})'.format(name, capture, kwargs_repr), repr(f)) f = self.partial(capture, *args, **kwargs) self.assertEqual('{}({!r}, {}, {})'.format(name, capture, args_repr, kwargs_repr), repr(f)) def test_pickle(self): f = self.partial(signature, 'asdf', bar=True) f.add_something_to__dict__ = True f_copy = pickle.loads(pickle.dumps(f)) self.assertEqual(signature(f), signature(f_copy)) # Issue 6083: Reference counting bug def test_setstate_refcount(self): class BadSequence: def __len__(self): return 4 def __getitem__(self, key): if key == 0: return max elif key == 1: return tuple(range(1000000)) elif key in (2, 3): return {} raise IndexError f = self.partial(object) self.assertRaisesRegex(SystemError, "new style getargs format but argument is not a tuple", f.__setstate__, BadSequence()) class TestPartialPy(TestPartial, unittest.TestCase): partial = staticmethod(py_functools.partial) if c_functools: class PartialSubclass(c_functools.partial): pass @unittest.skipUnless(c_functools, 'requires the C _functools module') class TestPartialCSubclass(TestPartialC): if c_functools: partial = PartialSubclass class TestPartialMethod(unittest.TestCase): class A(object): nothing = functools.partialmethod(capture) positional = functools.partialmethod(capture, 1) keywords = functools.partialmethod(capture, a=2) both = functools.partialmethod(capture, 3, b=4) nested = functools.partialmethod(positional, 5) over_partial = functools.partialmethod(functools.partial(capture, c=6), 7) static = functools.partialmethod(staticmethod(capture), 8) cls = functools.partialmethod(classmethod(capture), d=9) a = A() def test_arg_combinations(self): self.assertEqual(self.a.nothing(), ((self.a,), {})) self.assertEqual(self.a.nothing(5), ((self.a, 5), {})) self.assertEqual(self.a.nothing(c=6), ((self.a,), {'c': 6})) self.assertEqual(self.a.nothing(5, c=6), ((self.a, 5), {'c': 6})) self.assertEqual(self.a.positional(), ((self.a, 1), {})) self.assertEqual(self.a.positional(5), ((self.a, 1, 5), {})) self.assertEqual(self.a.positional(c=6), ((self.a, 1), {'c': 6})) self.assertEqual(self.a.positional(5, c=6), ((self.a, 1, 5), {'c': 6})) self.assertEqual(self.a.keywords(), ((self.a,), {'a': 2})) self.assertEqual(self.a.keywords(5), ((self.a, 5), {'a': 2})) self.assertEqual(self.a.keywords(c=6), ((self.a,), {'a': 2, 'c': 6})) self.assertEqual(self.a.keywords(5, c=6), ((self.a, 5), {'a': 2, 'c': 6})) self.assertEqual(self.a.both(), ((self.a, 3), {'b': 4})) self.assertEqual(self.a.both(5), ((self.a, 3, 5), {'b': 4})) self.assertEqual(self.a.both(c=6), ((self.a, 3), {'b': 4, 'c': 6})) self.assertEqual(self.a.both(5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6})) self.assertEqual(self.A.both(self.a, 5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6})) def test_nested(self): self.assertEqual(self.a.nested(), ((self.a, 1, 5), {})) self.assertEqual(self.a.nested(6), ((self.a, 1, 5, 6), {})) self.assertEqual(self.a.nested(d=7), ((self.a, 1, 5), {'d': 7})) self.assertEqual(self.a.nested(6, d=7), ((self.a, 1, 5, 6), {'d': 7})) self.assertEqual(self.A.nested(self.a, 6, d=7), ((self.a, 1, 5, 6), {'d': 7})) def test_over_partial(self): self.assertEqual(self.a.over_partial(), ((self.a, 7), {'c': 6})) self.assertEqual(self.a.over_partial(5), ((self.a, 7, 5), {'c': 6})) self.assertEqual(self.a.over_partial(d=8), ((self.a, 7), {'c': 6, 'd': 8})) self.assertEqual(self.a.over_partial(5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8})) self.assertEqual(self.A.over_partial(self.a, 5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8})) def test_bound_method_introspection(self): obj = self.a self.assertIs(obj.both.__self__, obj) self.assertIs(obj.nested.__self__, obj) self.assertIs(obj.over_partial.__self__, obj) self.assertIs(obj.cls.__self__, self.A) self.assertIs(self.A.cls.__self__, self.A) def test_unbound_method_retrieval(self): obj = self.A self.assertFalse(hasattr(obj.both, "__self__")) self.assertFalse(hasattr(obj.nested, "__self__")) self.assertFalse(hasattr(obj.over_partial, "__self__")) self.assertFalse(hasattr(obj.static, "__self__")) self.assertFalse(hasattr(self.a.static, "__self__")) def test_descriptors(self): for obj in [self.A, self.a]: with self.subTest(obj=obj): self.assertEqual(obj.static(), ((8,), {})) self.assertEqual(obj.static(5), ((8, 5), {})) self.assertEqual(obj.static(d=8), ((8,), {'d': 8})) self.assertEqual(obj.static(5, d=8), ((8, 5), {'d': 8})) self.assertEqual(obj.cls(), ((self.A,), {'d': 9})) self.assertEqual(obj.cls(5), ((self.A, 5), {'d': 9})) self.assertEqual(obj.cls(c=8), ((self.A,), {'c': 8, 'd': 9})) self.assertEqual(obj.cls(5, c=8), ((self.A, 5), {'c': 8, 'd': 9})) def test_overriding_keywords(self): self.assertEqual(self.a.keywords(a=3), ((self.a,), {'a': 3})) self.assertEqual(self.A.keywords(self.a, a=3), ((self.a,), {'a': 3})) def test_invalid_args(self): with self.assertRaises(TypeError): class B(object): method = functools.partialmethod(None, 1) def test_repr(self): self.assertEqual(repr(vars(self.A)['both']), 'functools.partialmethod({}, 3, b=4)'.format(capture)) def test_abstract(self): class Abstract(abc.ABCMeta): @abc.abstractmethod def add(self, x, y): pass add5 = functools.partialmethod(add, 5) self.assertTrue(Abstract.add.__isabstractmethod__) self.assertTrue(Abstract.add5.__isabstractmethod__) for func in [self.A.static, self.A.cls, self.A.over_partial, self.A.nested, self.A.both]: self.assertFalse(getattr(func, '__isabstractmethod__', False)) class TestUpdateWrapper(unittest.TestCase): def check_wrapper(self, wrapper, wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, updated=functools.WRAPPER_UPDATES): # Check attributes were assigned for name in assigned: self.assertIs(getattr(wrapper, name), getattr(wrapped, name)) # Check attributes were updated for name in updated: wrapper_attr = getattr(wrapper, name) wrapped_attr = getattr(wrapped, name) for key in wrapped_attr: if name == "__dict__" and key == "__wrapped__": # __wrapped__ is overwritten by the update code continue self.assertIs(wrapped_attr[key], wrapper_attr[key]) # Check __wrapped__ self.assertIs(wrapper.__wrapped__, wrapped) def _default_update(self): def f(a:'This is a new annotation'): """This is a test""" pass f.attr = 'This is also a test' f.__wrapped__ = "This is a bald faced lie" def wrapper(b:'This is the prior annotation'): pass functools.update_wrapper(wrapper, f) return wrapper, f def test_default_update(self): wrapper, f = self._default_update() self.check_wrapper(wrapper, f) self.assertIs(wrapper.__wrapped__, f) self.assertEqual(wrapper.__name__, 'f') self.assertEqual(wrapper.__qualname__, f.__qualname__) self.assertEqual(wrapper.attr, 'This is also a test') self.assertEqual(wrapper.__annotations__['a'], 'This is a new annotation') self.assertNotIn('b', wrapper.__annotations__) @unittest.skipIf(sys.flags.optimize >= 2, "Docstrings are omitted with -O2 and above") def test_default_update_doc(self): wrapper, f = self._default_update() self.assertEqual(wrapper.__doc__, 'This is a test') def test_no_update(self): def f(): """This is a test""" pass f.attr = 'This is also a test' def wrapper(): pass functools.update_wrapper(wrapper, f, (), ()) self.check_wrapper(wrapper, f, (), ()) self.assertEqual(wrapper.__name__, 'wrapper') self.assertNotEqual(wrapper.__qualname__, f.__qualname__) self.assertEqual(wrapper.__doc__, None) self.assertEqual(wrapper.__annotations__, {}) self.assertFalse(hasattr(wrapper, 'attr')) def test_selective_update(self): def f(): pass f.attr = 'This is a different test' f.dict_attr = dict(a=1, b=2, c=3) def wrapper(): pass wrapper.dict_attr = {} assign = ('attr',) update = ('dict_attr',) functools.update_wrapper(wrapper, f, assign, update) self.check_wrapper(wrapper, f, assign, update) self.assertEqual(wrapper.__name__, 'wrapper') self.assertNotEqual(wrapper.__qualname__, f.__qualname__) self.assertEqual(wrapper.__doc__, None) self.assertEqual(wrapper.attr, 'This is a different test') self.assertEqual(wrapper.dict_attr, f.dict_attr) def test_missing_attributes(self): def f(): pass def wrapper(): pass wrapper.dict_attr = {} assign = ('attr',) update = ('dict_attr',) # Missing attributes on wrapped object are ignored functools.update_wrapper(wrapper, f, assign, update) self.assertNotIn('attr', wrapper.__dict__) self.assertEqual(wrapper.dict_attr, {}) # Wrapper must have expected attributes for updating del wrapper.dict_attr with self.assertRaises(AttributeError): functools.update_wrapper(wrapper, f, assign, update) wrapper.dict_attr = 1 with self.assertRaises(AttributeError): functools.update_wrapper(wrapper, f, assign, update) @support.requires_docstrings @unittest.skipIf(sys.flags.optimize >= 2, "Docstrings are omitted with -O2 and above") def test_builtin_update(self): # Test for bug #1576241 def wrapper(): pass functools.update_wrapper(wrapper, max) self.assertEqual(wrapper.__name__, 'max') self.assertTrue(wrapper.__doc__.startswith('max(')) self.assertEqual(wrapper.__annotations__, {}) class TestWraps(TestUpdateWrapper): def _default_update(self): def f(): """This is a test""" pass f.attr = 'This is also a test' f.__wrapped__ = "This is still a bald faced lie" @functools.wraps(f) def wrapper(): pass return wrapper, f def test_default_update(self): wrapper, f = self._default_update() self.check_wrapper(wrapper, f) self.assertEqual(wrapper.__name__, 'f') self.assertEqual(wrapper.__qualname__, f.__qualname__) self.assertEqual(wrapper.attr, 'This is also a test') @unittest.skipIf(sys.flags.optimize >= 2, "Docstrings are omitted with -O2 and above") def test_default_update_doc(self): wrapper, _ = self._default_update() self.assertEqual(wrapper.__doc__, 'This is a test') def test_no_update(self): def f(): """This is a test""" pass f.attr = 'This is also a test' @functools.wraps(f, (), ()) def wrapper(): pass self.check_wrapper(wrapper, f, (), ()) self.assertEqual(wrapper.__name__, 'wrapper') self.assertNotEqual(wrapper.__qualname__, f.__qualname__) self.assertEqual(wrapper.__doc__, None) self.assertFalse(hasattr(wrapper, 'attr')) def test_selective_update(self): def f(): pass f.attr = 'This is a different test' f.dict_attr = dict(a=1, b=2, c=3) def add_dict_attr(f): f.dict_attr = {} return f assign = ('attr',) update = ('dict_attr',) @functools.wraps(f, assign, update) @add_dict_attr def wrapper(): pass self.check_wrapper(wrapper, f, assign, update) self.assertEqual(wrapper.__name__, 'wrapper') self.assertNotEqual(wrapper.__qualname__, f.__qualname__) self.assertEqual(wrapper.__doc__, None) self.assertEqual(wrapper.attr, 'This is a different test') self.assertEqual(wrapper.dict_attr, f.dict_attr) class TestReduce(unittest.TestCase): func = functools.reduce def test_reduce(self): class Squares: def __init__(self, max): self.max = max self.sofar = [] def __len__(self): return len(self.sofar) def __getitem__(self, i): if not 0 <= i < self.max: raise IndexError n = len(self.sofar) while n <= i: self.sofar.append(n*n) n += 1 return self.sofar[i] def add(x, y): return x + y self.assertEqual(self.func(add, ['a', 'b', 'c'], ''), 'abc') self.assertEqual( self.func(add, [['a', 'c'], [], ['d', 'w']], []), ['a','c','d','w'] ) self.assertEqual(self.func(lambda x, y: x*y, range(2,8), 1), 5040) self.assertEqual( self.func(lambda x, y: x*y, range(2,21), 1), 2432902008176640000 ) self.assertEqual(self.func(add, Squares(10)), 285) self.assertEqual(self.func(add, Squares(10), 0), 285) self.assertEqual(self.func(add, Squares(0), 0), 0) self.assertRaises(TypeError, self.func) self.assertRaises(TypeError, self.func, 42, 42) self.assertRaises(TypeError, self.func, 42, 42, 42) self.assertEqual(self.func(42, "1"), "1") # func is never called with one item self.assertEqual(self.func(42, "", "1"), "1") # func is never called with one item self.assertRaises(TypeError, self.func, 42, (42, 42)) self.assertRaises(TypeError, self.func, add, []) # arg 2 must not be empty sequence with no initial value self.assertRaises(TypeError, self.func, add, "") self.assertRaises(TypeError, self.func, add, ()) self.assertRaises(TypeError, self.func, add, object()) class TestFailingIter: def __iter__(self): raise RuntimeError self.assertRaises(RuntimeError, self.func, add, TestFailingIter()) self.assertEqual(self.func(add, [], None), None) self.assertEqual(self.func(add, [], 42), 42) class BadSeq: def __getitem__(self, index): raise ValueError self.assertRaises(ValueError, self.func, 42, BadSeq()) # Test reduce()'s use of iterators. def test_iterator_usage(self): class SequenceClass: def __init__(self, n): self.n = n def __getitem__(self, i): if 0 <= i < self.n: return i else: raise IndexError from operator import add self.assertEqual(self.func(add, SequenceClass(5)), 10) self.assertEqual(self.func(add, SequenceClass(5), 42), 52) self.assertRaises(TypeError, self.func, add, SequenceClass(0)) self.assertEqual(self.func(add, SequenceClass(0), 42), 42) self.assertEqual(self.func(add, SequenceClass(1)), 0) self.assertEqual(self.func(add, SequenceClass(1), 42), 42) d = {"one": 1, "two": 2, "three": 3} self.assertEqual(self.func(add, d), "".join(d.keys())) class TestCmpToKey: def test_cmp_to_key(self): def cmp1(x, y): return (x > y) - (x < y) key = self.cmp_to_key(cmp1) self.assertEqual(key(3), key(3)) self.assertGreater(key(3), key(1)) self.assertGreaterEqual(key(3), key(3)) def cmp2(x, y): return int(x) - int(y) key = self.cmp_to_key(cmp2) self.assertEqual(key(4.0), key('4')) self.assertLess(key(2), key('35')) self.assertLessEqual(key(2), key('35')) self.assertNotEqual(key(2), key('35')) def test_cmp_to_key_arguments(self): def cmp1(x, y): return (x > y) - (x < y) key = self.cmp_to_key(mycmp=cmp1) self.assertEqual(key(obj=3), key(obj=3)) self.assertGreater(key(obj=3), key(obj=1)) with self.assertRaises((TypeError, AttributeError)): key(3) > 1 # rhs is not a K object with self.assertRaises((TypeError, AttributeError)): 1 < key(3) # lhs is not a K object with self.assertRaises(TypeError): key = self.cmp_to_key() # too few args with self.assertRaises(TypeError): key = self.cmp_to_key(cmp1, None) # too many args key = self.cmp_to_key(cmp1) with self.assertRaises(TypeError): key() # too few args with self.assertRaises(TypeError): key(None, None) # too many args def test_bad_cmp(self): def cmp1(x, y): raise ZeroDivisionError key = self.cmp_to_key(cmp1) with self.assertRaises(ZeroDivisionError): key(3) > key(1) class BadCmp: def __lt__(self, other): raise ZeroDivisionError def cmp1(x, y): return BadCmp() with self.assertRaises(ZeroDivisionError): key(3) > key(1) def test_obj_field(self): def cmp1(x, y): return (x > y) - (x < y) key = self.cmp_to_key(mycmp=cmp1) self.assertEqual(key(50).obj, 50) def test_sort_int(self): def mycmp(x, y): return y - x self.assertEqual(sorted(range(5), key=self.cmp_to_key(mycmp)), [4, 3, 2, 1, 0]) def test_sort_int_str(self): def mycmp(x, y): x, y = int(x), int(y) return (x > y) - (x < y) values = [5, '3', 7, 2, '0', '1', 4, '10', 1] values = sorted(values, key=self.cmp_to_key(mycmp)) self.assertEqual([int(value) for value in values], [0, 1, 1, 2, 3, 4, 5, 7, 10]) def test_hash(self): def mycmp(x, y): return y - x key = self.cmp_to_key(mycmp) k = key(10) self.assertRaises(TypeError, hash, k) self.assertNotIsInstance(k, collections.Hashable) @unittest.skipUnless(c_functools, 'requires the C _functools module') class TestCmpToKeyC(TestCmpToKey, unittest.TestCase): if c_functools: cmp_to_key = c_functools.cmp_to_key class TestCmpToKeyPy(TestCmpToKey, unittest.TestCase): cmp_to_key = staticmethod(py_functools.cmp_to_key) class TestTotalOrdering(unittest.TestCase): def test_total_ordering_lt(self): @functools.total_ordering class A: def __init__(self, value): self.value = value def __lt__(self, other): return self.value < other.value def __eq__(self, other): return self.value == other.value self.assertTrue(A(1) < A(2)) self.assertTrue(A(2) > A(1)) self.assertTrue(A(1) <= A(2)) self.assertTrue(A(2) >= A(1)) self.assertTrue(A(2) <= A(2)) self.assertTrue(A(2) >= A(2)) self.assertFalse(A(1) > A(2)) def test_total_ordering_le(self): @functools.total_ordering class A: def __init__(self, value): self.value = value def __le__(self, other): return self.value <= other.value def __eq__(self, other): return self.value == other.value self.assertTrue(A(1) < A(2)) self.assertTrue(A(2) > A(1)) self.assertTrue(A(1) <= A(2)) self.assertTrue(A(2) >= A(1)) self.assertTrue(A(2) <= A(2)) self.assertTrue(A(2) >= A(2)) self.assertFalse(A(1) >= A(2)) def test_total_ordering_gt(self): @functools.total_ordering class A: def __init__(self, value): self.value = value def __gt__(self, other): return self.value > other.value def __eq__(self, other): return self.value == other.value self.assertTrue(A(1) < A(2)) self.assertTrue(A(2) > A(1)) self.assertTrue(A(1) <= A(2)) self.assertTrue(A(2) >= A(1)) self.assertTrue(A(2) <= A(2)) self.assertTrue(A(2) >= A(2)) self.assertFalse(A(2) < A(1)) def test_total_ordering_ge(self): @functools.total_ordering class A: def __init__(self, value): self.value = value def __ge__(self, other): return self.value >= other.value def __eq__(self, other): return self.value == other.value self.assertTrue(A(1) < A(2)) self.assertTrue(A(2) > A(1)) self.assertTrue(A(1) <= A(2)) self.assertTrue(A(2) >= A(1)) self.assertTrue(A(2) <= A(2)) self.assertTrue(A(2) >= A(2)) self.assertFalse(A(2) <= A(1)) def test_total_ordering_no_overwrite(self): # new methods should not overwrite existing @functools.total_ordering class A(int): pass self.assertTrue(A(1) < A(2)) self.assertTrue(A(2) > A(1)) self.assertTrue(A(1) <= A(2)) self.assertTrue(A(2) >= A(1)) self.assertTrue(A(2) <= A(2)) self.assertTrue(A(2) >= A(2)) def test_no_operations_defined(self): with self.assertRaises(ValueError): @functools.total_ordering class A: pass def test_type_error_when_not_implemented(self): # bug 10042; ensure stack overflow does not occur # when decorated types return NotImplemented @functools.total_ordering class ImplementsLessThan: def __init__(self, value): self.value = value def __eq__(self, other): if isinstance(other, ImplementsLessThan): return self.value == other.value return False def __lt__(self, other): if isinstance(other, ImplementsLessThan): return self.value < other.value return NotImplemented @functools.total_ordering class ImplementsGreaterThan: def __init__(self, value): self.value = value def __eq__(self, other): if isinstance(other, ImplementsGreaterThan): return self.value == other.value return False def __gt__(self, other): if isinstance(other, ImplementsGreaterThan): return self.value > other.value return NotImplemented @functools.total_ordering class ImplementsLessThanEqualTo: def __init__(self, value): self.value = value def __eq__(self, other): if isinstance(other, ImplementsLessThanEqualTo): return self.value == other.value return False def __le__(self, other): if isinstance(other, ImplementsLessThanEqualTo): return self.value <= other.value return NotImplemented @functools.total_ordering class ImplementsGreaterThanEqualTo: def __init__(self, value): self.value = value def __eq__(self, other): if isinstance(other, ImplementsGreaterThanEqualTo): return self.value == other.value return False def __ge__(self, other): if isinstance(other, ImplementsGreaterThanEqualTo): return self.value >= other.value return NotImplemented @functools.total_ordering class ComparatorNotImplemented: def __init__(self, value): self.value = value def __eq__(self, other): if isinstance(other, ComparatorNotImplemented): return self.value == other.value return False def __lt__(self, other): return NotImplemented with self.subTest("LT < 1"), self.assertRaises(TypeError): ImplementsLessThan(-1) < 1 with self.subTest("LT < LE"), self.assertRaises(TypeError): ImplementsLessThan(0) < ImplementsLessThanEqualTo(0) with self.subTest("LT < GT"), self.assertRaises(TypeError): ImplementsLessThan(1) < ImplementsGreaterThan(1) with self.subTest("LE <= LT"), self.assertRaises(TypeError): ImplementsLessThanEqualTo(2) <= ImplementsLessThan(2) with self.subTest("LE <= GE"), self.assertRaises(TypeError): ImplementsLessThanEqualTo(3) <= ImplementsGreaterThanEqualTo(3) with self.subTest("GT > GE"), self.assertRaises(TypeError): ImplementsGreaterThan(4) > ImplementsGreaterThanEqualTo(4) with self.subTest("GT > LT"), self.assertRaises(TypeError): ImplementsGreaterThan(5) > ImplementsLessThan(5) with self.subTest("GE >= GT"), self.assertRaises(TypeError): ImplementsGreaterThanEqualTo(6) >= ImplementsGreaterThan(6) with self.subTest("GE >= LE"), self.assertRaises(TypeError): ImplementsGreaterThanEqualTo(7) >= ImplementsLessThanEqualTo(7) with self.subTest("GE when equal"): a = ComparatorNotImplemented(8) b = ComparatorNotImplemented(8) self.assertEqual(a, b) with self.assertRaises(TypeError): a >= b with self.subTest("LE when equal"): a = ComparatorNotImplemented(9) b = ComparatorNotImplemented(9) self.assertEqual(a, b) with self.assertRaises(TypeError): a <= b class TestLRU(unittest.TestCase): def test_lru(self): def orig(x, y): return 3 * x + y f = functools.lru_cache(maxsize=20)(orig) hits, misses, maxsize, currsize = f.cache_info() self.assertEqual(maxsize, 20) self.assertEqual(currsize, 0) self.assertEqual(hits, 0) self.assertEqual(misses, 0) domain = range(5) for i in range(1000): x, y = choice(domain), choice(domain) actual = f(x, y) expected = orig(x, y) self.assertEqual(actual, expected) hits, misses, maxsize, currsize = f.cache_info() self.assertTrue(hits > misses) self.assertEqual(hits + misses, 1000) self.assertEqual(currsize, 20) f.cache_clear() # test clearing hits, misses, maxsize, currsize = f.cache_info() self.assertEqual(hits, 0) self.assertEqual(misses, 0) self.assertEqual(currsize, 0) f(x, y) hits, misses, maxsize, currsize = f.cache_info() self.assertEqual(hits, 0) self.assertEqual(misses, 1) self.assertEqual(currsize, 1) # Test bypassing the cache self.assertIs(f.__wrapped__, orig) f.__wrapped__(x, y) hits, misses, maxsize, currsize = f.cache_info() self.assertEqual(hits, 0) self.assertEqual(misses, 1) self.assertEqual(currsize, 1) # test size zero (which means "never-cache") @functools.lru_cache(0) def f(): nonlocal f_cnt f_cnt += 1 return 20 self.assertEqual(f.cache_info().maxsize, 0) f_cnt = 0 for i in range(5): self.assertEqual(f(), 20) self.assertEqual(f_cnt, 5) hits, misses, maxsize, currsize = f.cache_info() self.assertEqual(hits, 0) self.assertEqual(misses, 5) self.assertEqual(currsize, 0) # test size one @functools.lru_cache(1) def f(): nonlocal f_cnt f_cnt += 1 return 20 self.assertEqual(f.cache_info().maxsize, 1) f_cnt = 0 for i in range(5): self.assertEqual(f(), 20) self.assertEqual(f_cnt, 1) hits, misses, maxsize, currsize = f.cache_info() self.assertEqual(hits, 4) self.assertEqual(misses, 1) self.assertEqual(currsize, 1) # test size two @functools.lru_cache(2) def f(x): nonlocal f_cnt f_cnt += 1 return x*10 self.assertEqual(f.cache_info().maxsize, 2) f_cnt = 0 for x in 7, 9, 7, 9, 7, 9, 8, 8, 8, 9, 9, 9, 8, 8, 8, 7: # * * * * self.assertEqual(f(x), x*10) self.assertEqual(f_cnt, 4) hits, misses, maxsize, currsize = f.cache_info() self.assertEqual(hits, 12) self.assertEqual(misses, 4) self.assertEqual(currsize, 2) def test_lru_with_maxsize_none(self): @functools.lru_cache(maxsize=None) def fib(n): if n < 2: return n return fib(n-1) + fib(n-2) self.assertEqual([fib(n) for n in range(16)], [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610]) self.assertEqual(fib.cache_info(), functools._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16)) fib.cache_clear() self.assertEqual(fib.cache_info(), functools._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0)) def test_lru_with_exceptions(self): # Verify that user_function exceptions get passed through without # creating a hard-to-read chained exception. # http://bugs.python.org/issue13177 for maxsize in (None, 128): @functools.lru_cache(maxsize) def func(i): return 'abc'[i] self.assertEqual(func(0), 'a') with self.assertRaises(IndexError) as cm: func(15) self.assertIsNone(cm.exception.__context__) # Verify that the previous exception did not result in a cached entry with self.assertRaises(IndexError): func(15) def test_lru_with_types(self): for maxsize in (None, 128): @functools.lru_cache(maxsize=maxsize, typed=True) def square(x): return x * x self.assertEqual(square(3), 9) self.assertEqual(type(square(3)), type(9)) self.assertEqual(square(3.0), 9.0) self.assertEqual(type(square(3.0)), type(9.0)) self.assertEqual(square(x=3), 9) self.assertEqual(type(square(x=3)), type(9)) self.assertEqual(square(x=3.0), 9.0) self.assertEqual(type(square(x=3.0)), type(9.0)) self.assertEqual(square.cache_info().hits, 4) self.assertEqual(square.cache_info().misses, 4) def test_lru_with_keyword_args(self): @functools.lru_cache() def fib(n): if n < 2: return n return fib(n=n-1) + fib(n=n-2) self.assertEqual( [fib(n=number) for number in range(16)], [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610] ) self.assertEqual(fib.cache_info(), functools._CacheInfo(hits=28, misses=16, maxsize=128, currsize=16)) fib.cache_clear() self.assertEqual(fib.cache_info(), functools._CacheInfo(hits=0, misses=0, maxsize=128, currsize=0)) def test_lru_with_keyword_args_maxsize_none(self): @functools.lru_cache(maxsize=None) def fib(n): if n < 2: return n return fib(n=n-1) + fib(n=n-2) self.assertEqual([fib(n=number) for number in range(16)], [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610]) self.assertEqual(fib.cache_info(), functools._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16)) fib.cache_clear() self.assertEqual(fib.cache_info(), functools._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0)) def test_need_for_rlock(self): # This will deadlock on an LRU cache that uses a regular lock @functools.lru_cache(maxsize=10) def test_func(x): 'Used to demonstrate a reentrant lru_cache call within a single thread' return x class DoubleEq: 'Demonstrate a reentrant lru_cache call within a single thread' def __init__(self, x): self.x = x def __hash__(self): return self.x def __eq__(self, other): if self.x == 2: test_func(DoubleEq(1)) return self.x == other.x test_func(DoubleEq(1)) # Load the cache test_func(DoubleEq(2)) # Load the cache self.assertEqual(test_func(DoubleEq(2)), # Trigger a re-entrant __eq__ call DoubleEq(2)) # Verify the correct return value def test_early_detection_of_bad_call(self): # Issue #22184 with self.assertRaises(TypeError): @functools.lru_cache def f(): pass class TestSingleDispatch(unittest.TestCase): def test_simple_overloads(self): @functools.singledispatch def g(obj): return "base" def g_int(i): return "integer" g.register(int, g_int) self.assertEqual(g("str"), "base") self.assertEqual(g(1), "integer") self.assertEqual(g([1,2,3]), "base") def test_mro(self): @functools.singledispatch def g(obj): return "base" class A: pass class C(A): pass class B(A): pass class D(C, B): pass def g_A(a): return "A" def g_B(b): return "B" g.register(A, g_A) g.register(B, g_B) self.assertEqual(g(A()), "A") self.assertEqual(g(B()), "B") self.assertEqual(g(C()), "A") self.assertEqual(g(D()), "B") def test_register_decorator(self): @functools.singledispatch def g(obj): return "base" @g.register(int) def g_int(i): return "int %s" % (i,) self.assertEqual(g(""), "base") self.assertEqual(g(12), "int 12") self.assertIs(g.dispatch(int), g_int) self.assertIs(g.dispatch(object), g.dispatch(str)) # Note: in the assert above this is not g. # @singledispatch returns the wrapper. def test_wrapping_attributes(self): @functools.singledispatch def g(obj): "Simple test" return "Test" self.assertEqual(g.__name__, "g") if sys.flags.optimize < 2: self.assertEqual(g.__doc__, "Simple test") @unittest.skipUnless(decimal, 'requires _decimal') @support.cpython_only def test_c_classes(self): @functools.singledispatch def g(obj): return "base" @g.register(decimal.DecimalException) def _(obj): return obj.args subn = decimal.Subnormal("Exponent < Emin") rnd = decimal.Rounded("Number got rounded") self.assertEqual(g(subn), ("Exponent < Emin",)) self.assertEqual(g(rnd), ("Number got rounded",)) @g.register(decimal.Subnormal) def _(obj): return "Too small to care." self.assertEqual(g(subn), "Too small to care.") self.assertEqual(g(rnd), ("Number got rounded",)) def test_compose_mro(self): # None of the examples in this test depend on haystack ordering. c = collections mro = functools._compose_mro bases = [c.Sequence, c.MutableMapping, c.Mapping, c.Set] for haystack in permutations(bases): m = mro(dict, haystack) self.assertEqual(m, [dict, c.MutableMapping, c.Mapping, c.Sized, c.Iterable, c.Container, object]) bases = [c.Container, c.Mapping, c.MutableMapping, c.OrderedDict] for haystack in permutations(bases): m = mro(c.ChainMap, haystack) self.assertEqual(m, [c.ChainMap, c.MutableMapping, c.Mapping, c.Sized, c.Iterable, c.Container, object]) # If there's a generic function with implementations registered for # both Sized and Container, passing a defaultdict to it results in an # ambiguous dispatch which will cause a RuntimeError (see # test_mro_conflicts). bases = [c.Container, c.Sized, str] for haystack in permutations(bases): m = mro(c.defaultdict, [c.Sized, c.Container, str]) self.assertEqual(m, [c.defaultdict, dict, c.Sized, c.Container, object]) # MutableSequence below is registered directly on D. In other words, it # preceeds MutableMapping which means single dispatch will always # choose MutableSequence here. class D(c.defaultdict): pass c.MutableSequence.register(D) bases = [c.MutableSequence, c.MutableMapping] for haystack in permutations(bases): m = mro(D, bases) self.assertEqual(m, [D, c.MutableSequence, c.Sequence, c.defaultdict, dict, c.MutableMapping, c.Mapping, c.Sized, c.Iterable, c.Container, object]) # Container and Callable are registered on different base classes and # a generic function supporting both should always pick the Callable # implementation if a C instance is passed. class C(c.defaultdict): def __call__(self): pass bases = [c.Sized, c.Callable, c.Container, c.Mapping] for haystack in permutations(bases): m = mro(C, haystack) self.assertEqual(m, [C, c.Callable, c.defaultdict, dict, c.Mapping, c.Sized, c.Iterable, c.Container, object]) def test_register_abc(self): c = collections d = {"a": "b"} l = [1, 2, 3] s = {object(), None} f = frozenset(s) t = (1, 2, 3) @functools.singledispatch def g(obj): return "base" self.assertEqual(g(d), "base") self.assertEqual(g(l), "base") self.assertEqual(g(s), "base") self.assertEqual(g(f), "base") self.assertEqual(g(t), "base") g.register(c.Sized, lambda obj: "sized") self.assertEqual(g(d), "sized") self.assertEqual(g(l), "sized") self.assertEqual(g(s), "sized") self.assertEqual(g(f), "sized") self.assertEqual(g(t), "sized") g.register(c.MutableMapping, lambda obj: "mutablemapping") self.assertEqual(g(d), "mutablemapping") self.assertEqual(g(l), "sized") self.assertEqual(g(s), "sized") self.assertEqual(g(f), "sized") self.assertEqual(g(t), "sized") g.register(c.ChainMap, lambda obj: "chainmap") self.assertEqual(g(d), "mutablemapping") # irrelevant ABCs registered self.assertEqual(g(l), "sized") self.assertEqual(g(s), "sized") self.assertEqual(g(f), "sized") self.assertEqual(g(t), "sized") g.register(c.MutableSequence, lambda obj: "mutablesequence") self.assertEqual(g(d), "mutablemapping") self.assertEqual(g(l), "mutablesequence") self.assertEqual(g(s), "sized") self.assertEqual(g(f), "sized") self.assertEqual(g(t), "sized") g.register(c.MutableSet, lambda obj: "mutableset") self.assertEqual(g(d), "mutablemapping") self.assertEqual(g(l), "mutablesequence") self.assertEqual(g(s), "mutableset") self.assertEqual(g(f), "sized") self.assertEqual(g(t), "sized") g.register(c.Mapping, lambda obj: "mapping") self.assertEqual(g(d), "mutablemapping") # not specific enough self.assertEqual(g(l), "mutablesequence") self.assertEqual(g(s), "mutableset") self.assertEqual(g(f), "sized") self.assertEqual(g(t), "sized") g.register(c.Sequence, lambda obj: "sequence") self.assertEqual(g(d), "mutablemapping") self.assertEqual(g(l), "mutablesequence") self.assertEqual(g(s), "mutableset") self.assertEqual(g(f), "sized") self.assertEqual(g(t), "sequence") g.register(c.Set, lambda obj: "set") self.assertEqual(g(d), "mutablemapping") self.assertEqual(g(l), "mutablesequence") self.assertEqual(g(s), "mutableset") self.assertEqual(g(f), "set") self.assertEqual(g(t), "sequence") g.register(dict, lambda obj: "dict") self.assertEqual(g(d), "dict") self.assertEqual(g(l), "mutablesequence") self.assertEqual(g(s), "mutableset") self.assertEqual(g(f), "set") self.assertEqual(g(t), "sequence") g.register(list, lambda obj: "list") self.assertEqual(g(d), "dict") self.assertEqual(g(l), "list") self.assertEqual(g(s), "mutableset") self.assertEqual(g(f), "set") self.assertEqual(g(t), "sequence") g.register(set, lambda obj: "concrete-set") self.assertEqual(g(d), "dict") self.assertEqual(g(l), "list") self.assertEqual(g(s), "concrete-set") self.assertEqual(g(f), "set") self.assertEqual(g(t), "sequence") g.register(frozenset, lambda obj: "frozen-set") self.assertEqual(g(d), "dict") self.assertEqual(g(l), "list") self.assertEqual(g(s), "concrete-set") self.assertEqual(g(f), "frozen-set") self.assertEqual(g(t), "sequence") g.register(tuple, lambda obj: "tuple") self.assertEqual(g(d), "dict") self.assertEqual(g(l), "list") self.assertEqual(g(s), "concrete-set") self.assertEqual(g(f), "frozen-set") self.assertEqual(g(t), "tuple") def test_c3_abc(self): c = collections mro = functools._c3_mro class A(object): pass class B(A): def __len__(self): return 0 # implies Sized @c.Container.register class C(object): pass class D(object): pass # unrelated class X(D, C, B): def __call__(self): pass # implies Callable expected = [X, c.Callable, D, C, c.Container, B, c.Sized, A, object] for abcs in permutations([c.Sized, c.Callable, c.Container]): self.assertEqual(mro(X, abcs=abcs), expected) # unrelated ABCs don't appear in the resulting MRO many_abcs = [c.Mapping, c.Sized, c.Callable, c.Container, c.Iterable] self.assertEqual(mro(X, abcs=many_abcs), expected) def test_mro_conflicts(self): c = collections @functools.singledispatch def g(arg): return "base" class O(c.Sized): def __len__(self): return 0 o = O() self.assertEqual(g(o), "base") g.register(c.Iterable, lambda arg: "iterable") g.register(c.Container, lambda arg: "container") g.register(c.Sized, lambda arg: "sized") g.register(c.Set, lambda arg: "set") self.assertEqual(g(o), "sized") c.Iterable.register(O) self.assertEqual(g(o), "sized") # because it's explicitly in __mro__ c.Container.register(O) self.assertEqual(g(o), "sized") # see above: Sized is in __mro__ c.Set.register(O) self.assertEqual(g(o), "set") # because c.Set is a subclass of # c.Sized and c.Container class P: pass p = P() self.assertEqual(g(p), "base") c.Iterable.register(P) self.assertEqual(g(p), "iterable") c.Container.register(P) with self.assertRaises(RuntimeError) as re_one: g(p) self.assertIn( str(re_one.exception), (("Ambiguous dispatch: <class 'collections.abc.Container'> " "or <class 'collections.abc.Iterable'>"), ("Ambiguous dispatch: <class 'collections.abc.Iterable'> " "or <class 'collections.abc.Container'>")), ) class Q(c.Sized): def __len__(self): return 0 q = Q() self.assertEqual(g(q), "sized") c.Iterable.register(Q) self.assertEqual(g(q), "sized") # because it's explicitly in __mro__ c.Set.register(Q) self.assertEqual(g(q), "set") # because c.Set is a subclass of # c.Sized and c.Iterable @functools.singledispatch def h(arg): return "base" @h.register(c.Sized) def _(arg): return "sized" @h.register(c.Container) def _(arg): return "container" # Even though Sized and Container are explicit bases of MutableMapping, # this ABC is implicitly registered on defaultdict which makes all of # MutableMapping's bases implicit as well from defaultdict's # perspective. with self.assertRaises(RuntimeError) as re_two: h(c.defaultdict(lambda: 0)) self.assertIn( str(re_two.exception), (("Ambiguous dispatch: <class 'collections.abc.Container'> " "or <class 'collections.abc.Sized'>"), ("Ambiguous dispatch: <class 'collections.abc.Sized'> " "or <class 'collections.abc.Container'>")), ) class R(c.defaultdict): pass c.MutableSequence.register(R) @functools.singledispatch def i(arg): return "base" @i.register(c.MutableMapping) def _(arg): return "mapping" @i.register(c.MutableSequence) def _(arg): return "sequence" r = R() self.assertEqual(i(r), "sequence") class S: pass class T(S, c.Sized): def __len__(self): return 0 t = T() self.assertEqual(h(t), "sized") c.Container.register(T) self.assertEqual(h(t), "sized") # because it's explicitly in the MRO class U: def __len__(self): return 0 u = U() self.assertEqual(h(u), "sized") # implicit Sized subclass inferred # from the existence of __len__() c.Container.register(U) # There is no preference for registered versus inferred ABCs. with self.assertRaises(RuntimeError) as re_three: h(u) self.assertIn( str(re_three.exception), (("Ambiguous dispatch: <class 'collections.abc.Container'> " "or <class 'collections.abc.Sized'>"), ("Ambiguous dispatch: <class 'collections.abc.Sized'> " "or <class 'collections.abc.Container'>")), ) class V(c.Sized, S): def __len__(self): return 0 @functools.singledispatch def j(arg): return "base" @j.register(S) def _(arg): return "s" @j.register(c.Container) def _(arg): return "container" v = V() self.assertEqual(j(v), "s") c.Container.register(V) self.assertEqual(j(v), "container") # because it ends up right after # Sized in the MRO def test_cache_invalidation(self): from collections import UserDict class TracingDict(UserDict): def __init__(self, *args, **kwargs): super(TracingDict, self).__init__(*args, **kwargs) self.set_ops = [] self.get_ops = [] def __getitem__(self, key): result = self.data[key] self.get_ops.append(key) return result def __setitem__(self, key, value): self.set_ops.append(key) self.data[key] = value def clear(self): self.data.clear() _orig_wkd = functools.WeakKeyDictionary td = TracingDict() functools.WeakKeyDictionary = lambda: td c = collections @functools.singledispatch def g(arg): return "base" d = {} l = [] self.assertEqual(len(td), 0) self.assertEqual(g(d), "base") self.assertEqual(len(td), 1) self.assertEqual(td.get_ops, []) self.assertEqual(td.set_ops, [dict]) self.assertEqual(td.data[dict], g.registry[object]) self.assertEqual(g(l), "base") self.assertEqual(len(td), 2) self.assertEqual(td.get_ops, []) self.assertEqual(td.set_ops, [dict, list]) self.assertEqual(td.data[dict], g.registry[object]) self.assertEqual(td.data[list], g.registry[object]) self.assertEqual(td.data[dict], td.data[list]) self.assertEqual(g(l), "base") self.assertEqual(g(d), "base") self.assertEqual(td.get_ops, [list, dict]) self.assertEqual(td.set_ops, [dict, list]) g.register(list, lambda arg: "list") self.assertEqual(td.get_ops, [list, dict]) self.assertEqual(len(td), 0) self.assertEqual(g(d), "base") self.assertEqual(len(td), 1) self.assertEqual(td.get_ops, [list, dict]) self.assertEqual(td.set_ops, [dict, list, dict]) self.assertEqual(td.data[dict], functools._find_impl(dict, g.registry)) self.assertEqual(g(l), "list") self.assertEqual(len(td), 2) self.assertEqual(td.get_ops, [list, dict]) self.assertEqual(td.set_ops, [dict, list, dict, list]) self.assertEqual(td.data[list], functools._find_impl(list, g.registry)) class X: pass c.MutableMapping.register(X) # Will not invalidate the cache, # not using ABCs yet. self.assertEqual(g(d), "base") self.assertEqual(g(l), "list") self.assertEqual(td.get_ops, [list, dict, dict, list]) self.assertEqual(td.set_ops, [dict, list, dict, list]) g.register(c.Sized, lambda arg: "sized") self.assertEqual(len(td), 0) self.assertEqual(g(d), "sized") self.assertEqual(len(td), 1) self.assertEqual(td.get_ops, [list, dict, dict, list]) self.assertEqual(td.set_ops, [dict, list, dict, list, dict]) self.assertEqual(g(l), "list") self.assertEqual(len(td), 2) self.assertEqual(td.get_ops, [list, dict, dict, list]) self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list]) self.assertEqual(g(l), "list") self.assertEqual(g(d), "sized") self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict]) self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list]) g.dispatch(list) g.dispatch(dict) self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict, list, dict]) self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list]) c.MutableSet.register(X) # Will invalidate the cache. self.assertEqual(len(td), 2) # Stale cache. self.assertEqual(g(l), "list") self.assertEqual(len(td), 1) g.register(c.MutableMapping, lambda arg: "mutablemapping") self.assertEqual(len(td), 0) self.assertEqual(g(d), "mutablemapping") self.assertEqual(len(td), 1) self.assertEqual(g(l), "list") self.assertEqual(len(td), 2) g.register(dict, lambda arg: "dict") self.assertEqual(g(d), "dict") self.assertEqual(g(l), "list") g._clear_cache() self.assertEqual(len(td), 0) functools.WeakKeyDictionary = _orig_wkd def test_main(verbose=None): test_classes = ( TestPartialC, TestPartialPy, TestPartialCSubclass, TestPartialMethod, TestUpdateWrapper, TestTotalOrdering, TestCmpToKeyC, TestCmpToKeyPy, TestWraps, TestReduce, TestLRU, TestSingleDispatch, ) support.run_unittest(*test_classes) # verify reference counting if verbose and hasattr(sys, "gettotalrefcount"): import gc counts = [None] * 5 for i in range(len(counts)): support.run_unittest(*test_classes) gc.collect() counts[i] = sys.gettotalrefcount() print(counts) if __name__ == '__main__': test_main(verbose=True)
jiangzhuo/kbengine
kbe/src/lib/python/Lib/test/test_functools.py
Python
lgpl-3.0
60,228
package B; import A.C; public class M { int y = new C().get(); }
asedunov/intellij-community
java/java-tests/testData/compileServer/incremental/fieldProperties/nonIncremental3/src/B/M.java
Java
apache-2.0
71
import datetime import uuid from django.db import models class Author(models.Model): name = models.CharField(max_length=100) class Meta: ordering = ('name',) def __str__(self): return self.name class BetterAuthor(Author): write_speed = models.IntegerField() class Book(models.Model): author = models.ForeignKey(Author, models.CASCADE) title = models.CharField(max_length=100) class Meta: unique_together = ( ('author', 'title'), ) ordering = ['id'] def __str__(self): return self.title def clean(self): # Ensure author is always accessible in clean method assert self.author.name is not None class BookWithCustomPK(models.Model): my_pk = models.DecimalField(max_digits=5, decimal_places=0, primary_key=True) author = models.ForeignKey(Author, models.CASCADE) title = models.CharField(max_length=100) def __str__(self): return '%s: %s' % (self.my_pk, self.title) class Editor(models.Model): name = models.CharField(max_length=100) class BookWithOptionalAltEditor(models.Model): author = models.ForeignKey(Author, models.CASCADE) # Optional secondary author alt_editor = models.ForeignKey(Editor, models.SET_NULL, blank=True, null=True) title = models.CharField(max_length=100) class Meta: unique_together = ( ('author', 'title', 'alt_editor'), ) def __str__(self): return self.title class AlternateBook(Book): notes = models.CharField(max_length=100) def __str__(self): return '%s - %s' % (self.title, self.notes) class AuthorMeeting(models.Model): name = models.CharField(max_length=100) authors = models.ManyToManyField(Author) created = models.DateField(editable=False) def __str__(self): return self.name class CustomPrimaryKey(models.Model): my_pk = models.CharField(max_length=10, primary_key=True) some_field = models.CharField(max_length=100) # models for inheritance tests. class Place(models.Model): name = models.CharField(max_length=50) city = models.CharField(max_length=50) def __str__(self): return self.name class Owner(models.Model): auto_id = models.AutoField(primary_key=True) name = models.CharField(max_length=100) place = models.ForeignKey(Place, models.CASCADE) def __str__(self): return "%s at %s" % (self.name, self.place) class Location(models.Model): place = models.ForeignKey(Place, models.CASCADE, unique=True) # this is purely for testing the data doesn't matter here :) lat = models.CharField(max_length=100) lon = models.CharField(max_length=100) class OwnerProfile(models.Model): owner = models.OneToOneField(Owner, models.CASCADE, primary_key=True) age = models.PositiveIntegerField() def __str__(self): return "%s is %d" % (self.owner.name, self.age) class Restaurant(Place): serves_pizza = models.BooleanField(default=False) class Product(models.Model): slug = models.SlugField(unique=True) def __str__(self): return self.slug class Price(models.Model): price = models.DecimalField(max_digits=10, decimal_places=2) quantity = models.PositiveIntegerField() class Meta: unique_together = (('price', 'quantity'),) def __str__(self): return "%s for %s" % (self.quantity, self.price) class MexicanRestaurant(Restaurant): serves_tacos = models.BooleanField(default=False) class ClassyMexicanRestaurant(MexicanRestaurant): the_restaurant = models.OneToOneField(MexicanRestaurant, models.CASCADE, parent_link=True, primary_key=True) tacos_are_yummy = models.BooleanField(default=False) # models for testing unique_together validation when a fk is involved and # using inlineformset_factory. class Repository(models.Model): name = models.CharField(max_length=25) def __str__(self): return self.name class Revision(models.Model): repository = models.ForeignKey(Repository, models.CASCADE) revision = models.CharField(max_length=40) class Meta: unique_together = (("repository", "revision"),) def __str__(self): return "%s (%s)" % (self.revision, str(self.repository)) # models for testing callable defaults (see bug #7975). If you define a model # with a callable default value, you cannot rely on the initial value in a # form. class Person(models.Model): name = models.CharField(max_length=128) class Membership(models.Model): person = models.ForeignKey(Person, models.CASCADE) date_joined = models.DateTimeField(default=datetime.datetime.now) karma = models.IntegerField() # models for testing a null=True fk to a parent class Team(models.Model): name = models.CharField(max_length=100) class Player(models.Model): team = models.ForeignKey(Team, models.SET_NULL, null=True) name = models.CharField(max_length=100) def __str__(self): return self.name # Models for testing custom ModelForm save methods in formsets and inline formsets class Poet(models.Model): name = models.CharField(max_length=100) def __str__(self): return self.name class Poem(models.Model): poet = models.ForeignKey(Poet, models.CASCADE) name = models.CharField(max_length=100) def __str__(self): return self.name class Post(models.Model): title = models.CharField(max_length=50, unique_for_date='posted', blank=True) slug = models.CharField(max_length=50, unique_for_year='posted', blank=True) subtitle = models.CharField(max_length=50, unique_for_month='posted', blank=True) posted = models.DateField() def __str__(self): return self.title # Models for testing UUID primary keys class UUIDPKParent(models.Model): uuid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) name = models.CharField(max_length=255) class UUIDPKChild(models.Model): uuid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) name = models.CharField(max_length=255) parent = models.ForeignKey(UUIDPKParent, models.CASCADE) class ChildWithEditablePK(models.Model): name = models.CharField(max_length=255, primary_key=True) parent = models.ForeignKey(UUIDPKParent, models.CASCADE) class AutoPKChildOfUUIDPKParent(models.Model): name = models.CharField(max_length=255) parent = models.ForeignKey(UUIDPKParent, models.CASCADE) class AutoPKParent(models.Model): name = models.CharField(max_length=255) class UUIDPKChildOfAutoPKParent(models.Model): uuid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) name = models.CharField(max_length=255) parent = models.ForeignKey(AutoPKParent, models.CASCADE) class ParentWithUUIDAlternateKey(models.Model): uuid = models.UUIDField(unique=True, default=uuid.uuid4, editable=False) name = models.CharField(max_length=50) class ChildRelatedViaAK(models.Model): name = models.CharField(max_length=255) parent = models.ForeignKey(ParentWithUUIDAlternateKey, models.CASCADE, to_field='uuid')
elena/django
tests/model_formsets/models.py
Python
bsd-3-clause
7,170
// // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // /* * * COM+99 EE to Debugger Interface Implementation * */ #include "common.h" #include "dbginterface.h" #include "eedbginterfaceimpl.h" #include "virtualcallstub.h" #include "contractimpl.h" #ifdef DEBUGGING_SUPPORTED #ifndef DACCESS_COMPILE // // Cleanup any global data used by this interface. // void EEDbgInterfaceImpl::Terminate(void) { CONTRACTL { SO_INTOLERANT; NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; if (g_pEEDbgInterfaceImpl) { delete g_pEEDbgInterfaceImpl; g_pEEDbgInterfaceImpl = NULL; } } #endif // #ifndef DACCESS_COMPILE Thread* EEDbgInterfaceImpl::GetThread(void) { LIMITED_METHOD_CONTRACT; // Since this may be called from a Debugger Interop Hijack, the EEThread may be bogus. // Thus we can't use contracts. If we do fix that, then the contract below would be nice... #if 0 CONTRACT(Thread *) { NOTHROW; GC_NOTRIGGER; POSTCONDITION(CheckPointer(RETVAL, NULL_OK)); } CONTRACT_END; #endif return ::GetThread(); } #ifndef DACCESS_COMPILE void EEDbgInterfaceImpl::SetEEThreadPtr(VOID* newPtr) { // Since this may be called from a Debugger Interop Hijack, the EEThread may be bogus. // Thus we can't use contracts. If we do fix that, then the contract below would be nice... #if 0 CONTRACTL { NOTHROW; GC_NOTRIGGER; PRECONDITION(GetThread() == NULL); // shouldn't have an EE thread. } CONTRACTL_END; #endif // This should only be called by interop-debugging when we don't have an EE thread // object. // Normally the LS & RS can communicate a pointer value using the EE thread's // m_debuggerWord field. If we have no EE thread, then we can use the // TLS slot that the EE thread would have been in. SetThread((Thread*)newPtr); } StackWalkAction EEDbgInterfaceImpl::StackWalkFramesEx(Thread* pThread, PREGDISPLAY pRD, PSTACKWALKFRAMESCALLBACK pCallback, VOID* pData, unsigned int flags) { CONTRACTL { DISABLED(NOTHROW); // FIX THIS when StackWalkFramesEx gets fixed. DISABLED(GC_TRIGGERS); // We cannot predict if pCallback will trigger or not. // Disabled is not a bug in this case. PRECONDITION(CheckPointer(pThread)); } CONTRACTL_END; return pThread->StackWalkFramesEx(pRD, pCallback, pData, flags); } Frame *EEDbgInterfaceImpl::GetFrame(CrawlFrame *pCF) { CONTRACT(Frame *) { SO_NOT_MAINLINE; NOTHROW; GC_NOTRIGGER; PRECONDITION(CheckPointer(pCF)); POSTCONDITION(CheckPointer(RETVAL, NULL_OK)); } CONTRACT_END; RETURN pCF->GetFrame(); } bool EEDbgInterfaceImpl::InitRegDisplay(Thread* pThread, const PREGDISPLAY pRD, const PCONTEXT pctx, bool validContext) { CONTRACTL { SO_NOT_MAINLINE; NOTHROW; GC_NOTRIGGER; PRECONDITION(CheckPointer(pThread)); PRECONDITION(CheckPointer(pRD)); if (validContext) { PRECONDITION(CheckPointer(pctx)); } } CONTRACTL_END; return pThread->InitRegDisplay(pRD, pctx, validContext); } BOOL EEDbgInterfaceImpl::IsStringObject(Object* o) { CONTRACTL { SO_NOT_MAINLINE; THROWS; GC_NOTRIGGER; PRECONDITION(CheckPointer(o)); } CONTRACTL_END; return o->GetMethodTable() == g_pStringClass; } BOOL EEDbgInterfaceImpl::IsTypedReference(MethodTable* pMT) { CONTRACTL { SO_NOT_MAINLINE; THROWS; GC_NOTRIGGER; PRECONDITION(CheckPointer(pMT)); } CONTRACTL_END; return pMT == g_TypedReferenceMT; } WCHAR* EEDbgInterfaceImpl::StringObjectGetBuffer(StringObject* so) { CONTRACTL { SO_NOT_MAINLINE; NOTHROW; GC_NOTRIGGER; PRECONDITION(CheckPointer(so)); } CONTRACTL_END; return so->GetBuffer(); } DWORD EEDbgInterfaceImpl::StringObjectGetStringLength(StringObject* so) { CONTRACTL { SO_NOT_MAINLINE; NOTHROW; GC_NOTRIGGER; PRECONDITION(CheckPointer(so)); } CONTRACTL_END; return so->GetStringLength(); } void* EEDbgInterfaceImpl::GetObjectFromHandle(OBJECTHANDLE handle) { CONTRACTL { SO_NOT_MAINLINE; NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; void *v; *((OBJECTREF *)&v) = *(OBJECTREF *)handle; return v; } OBJECTHANDLE EEDbgInterfaceImpl::GetHandleFromObject(void *obj, bool fStrongNewRef, AppDomain *pAppDomain) { CONTRACTL { SO_NOT_MAINLINE; THROWS; // From CreateHandle GC_NOTRIGGER; PRECONDITION(CheckPointer(pAppDomain)); } CONTRACTL_END; OBJECTHANDLE oh; if (fStrongNewRef) { oh = pAppDomain->CreateStrongHandle(ObjectToOBJECTREF((Object *)obj)); LOG((LF_CORDB, LL_INFO1000, "EEI::GHFO: Given objectref 0x%x," "created strong handle 0x%x!\n", obj, oh)); } else { oh = pAppDomain->CreateLongWeakHandle( ObjectToOBJECTREF((Object *)obj)); LOG((LF_CORDB, LL_INFO1000, "EEI::GHFO: Given objectref 0x%x," "created long weak handle 0x%x!\n", obj, oh)); } return oh; } void EEDbgInterfaceImpl::DbgDestroyHandle(OBJECTHANDLE oh, bool fStrongNewRef) { CONTRACTL { SO_NOT_MAINLINE; NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; LOG((LF_CORDB, LL_INFO1000, "EEI::GHFO: Destroyed given handle 0x%x," "fStrong: 0x%x!\n", oh, fStrongNewRef)); if (fStrongNewRef) { DestroyStrongHandle(oh); } else { DestroyLongWeakHandle(oh); } } OBJECTHANDLE EEDbgInterfaceImpl::GetThreadException(Thread *pThread) { CONTRACTL { NOTHROW; GC_NOTRIGGER; PRECONDITION(CheckPointer(pThread)); } CONTRACTL_END; OBJECTHANDLE oh = pThread->GetThrowableAsHandle(); if (oh != NULL) { return oh; } // Return the last thrown object if there's no current throwable. // This logic is similar to UpdateCurrentThrowable(). return pThread->m_LastThrownObjectHandle; } bool EEDbgInterfaceImpl::IsThreadExceptionNull(Thread *pThread) { CONTRACTL { SO_TOLERANT; NOTHROW; GC_NOTRIGGER; PRECONDITION(CheckPointer(pThread)); } CONTRACTL_END; // // We're assuming that the handle on the // thread is a strong handle and we're goona check it for // NULL. We're also assuming something about the // implementation of the handle here, too. // OBJECTHANDLE h = pThread->GetThrowableAsHandle(); if (h == NULL) { return true; } void *pThrowable = *((void**)h); return (pThrowable == NULL); } void EEDbgInterfaceImpl::ClearThreadException(Thread *pThread) { // // If one day there is a continuable exception, then this will have to be // implemented properly. // // LIMITED_METHOD_CONTRACT; } bool EEDbgInterfaceImpl::StartSuspendForDebug(AppDomain *pAppDomain, BOOL fHoldingThreadStoreLock) { CONTRACTL { SO_NOT_MAINLINE; NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; LOG((LF_CORDB,LL_INFO1000, "EEDbgII:SSFD: start suspend on AD:0x%x\n", pAppDomain)); bool result = Thread::SysStartSuspendForDebug(pAppDomain); return result; } bool EEDbgInterfaceImpl::SweepThreadsForDebug(bool forceSync) { CONTRACTL { SO_NOT_MAINLINE; NOTHROW; DISABLED(GC_TRIGGERS); // Called by unmanaged threads. } CONTRACTL_END; return Thread::SysSweepThreadsForDebug(forceSync); } void EEDbgInterfaceImpl::ResumeFromDebug(AppDomain *pAppDomain) { CONTRACTL { SO_NOT_MAINLINE; NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; Thread::SysResumeFromDebug(pAppDomain); } void EEDbgInterfaceImpl::MarkThreadForDebugSuspend(Thread* pRuntimeThread) { CONTRACTL { SO_NOT_MAINLINE; NOTHROW; GC_NOTRIGGER; PRECONDITION(CheckPointer(pRuntimeThread)); } CONTRACTL_END; pRuntimeThread->MarkForDebugSuspend(); } void EEDbgInterfaceImpl::MarkThreadForDebugStepping(Thread* pRuntimeThread, bool onOff) { CONTRACTL { SO_NOT_MAINLINE; NOTHROW; GC_NOTRIGGER; PRECONDITION(CheckPointer(pRuntimeThread)); } CONTRACTL_END; pRuntimeThread->MarkDebuggerIsStepping(onOff); } void EEDbgInterfaceImpl::SetThreadFilterContext(Thread *thread, CONTEXT *context) { CONTRACTL { SO_NOT_MAINLINE; NOTHROW; GC_NOTRIGGER; PRECONDITION(CheckPointer(thread)); } CONTRACTL_END; thread->SetFilterContext(context); } CONTEXT *EEDbgInterfaceImpl::GetThreadFilterContext(Thread *thread) { CONTRACT(CONTEXT *) { NOTHROW; GC_NOTRIGGER; PRECONDITION(CheckPointer(thread)); POSTCONDITION(CheckPointer(RETVAL, NULL_OK)); } CONTRACT_END; RETURN thread->GetFilterContext(); } VOID * EEDbgInterfaceImpl::GetThreadDebuggerWord(Thread *thread) { CONTRACTL { SO_NOT_MAINLINE; NOTHROW; GC_NOTRIGGER; PRECONDITION(CheckPointer(thread)); } CONTRACTL_END; return thread->m_debuggerWord; } void EEDbgInterfaceImpl::SetThreadDebuggerWord(Thread *thread, VOID *dw) { CONTRACTL { SO_NOT_MAINLINE; NOTHROW; GC_NOTRIGGER; PRECONDITION(CheckPointer(thread)); } CONTRACTL_END; thread->m_debuggerWord = dw; } BOOL EEDbgInterfaceImpl::IsManagedNativeCode(const BYTE *address) { WRAPPER_NO_CONTRACT; return ExecutionManager::IsManagedCode((PCODE)address); } MethodDesc *EEDbgInterfaceImpl::GetNativeCodeMethodDesc(const PCODE address) { CONTRACT(MethodDesc *) { NOTHROW; GC_NOTRIGGER; PRECONDITION(address != NULL); POSTCONDITION(CheckPointer(RETVAL, NULL_OK)); } CONTRACT_END; RETURN ExecutionManager::GetCodeMethodDesc(address); } // IsInPrologOrEpilog doesn't seem to be used for code that uses GC_INFO_DECODER BOOL EEDbgInterfaceImpl::IsInPrologOrEpilog(const BYTE *address, size_t* prologSize) { CONTRACTL { SO_NOT_MAINLINE; NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; *prologSize = 0; EECodeInfo codeInfo((PCODE)address); if (codeInfo.IsValid()) { LPVOID methodInfo = codeInfo.GetGCInfo(); if (codeInfo.GetCodeManager()->IsInPrologOrEpilog(codeInfo.GetRelOffset(), methodInfo, prologSize)) { return TRUE; } } return FALSE; } // // Given a collection of native offsets of a certain function, determine if each falls // within an exception filter or handler. // void EEDbgInterfaceImpl::DetermineIfOffsetsInFilterOrHandler(const BYTE *functionAddress, DebugOffsetToHandlerInfo *pOffsetToHandlerInfo, unsigned offsetToHandlerInfoLength) { CONTRACTL { SO_NOT_MAINLINE; NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; EECodeInfo codeInfo((PCODE)functionAddress); if (!codeInfo.IsValid()) { return; } // Loop through all the exception handling clause information for the method EH_CLAUSE_ENUMERATOR pEnumState; unsigned EHCount = codeInfo.GetJitManager()->InitializeEHEnumeration(codeInfo.GetMethodToken(), &pEnumState); if (EHCount == 0) { return; } for (ULONG i=0; i < EHCount; i++) { EE_ILEXCEPTION_CLAUSE EHClause; codeInfo.GetJitManager()->GetNextEHClause(&pEnumState, &EHClause); // Check each EH clause against each offset of interest. // Note that this could be time consuming for very long methods ( O(n^2) ). // We could make this linear if we could guarentee that the two lists are sorted. for (ULONG j=0; j < offsetToHandlerInfoLength; j++) { SIZE_T offs = pOffsetToHandlerInfo[j].offset; // those with -1 indicate slots to skip if (offs == (SIZE_T) -1) { continue; } // For a filter, the handler comes directly after it so check from start of filter // to end of handler if (IsFilterHandler(&EHClause)) { if (offs >= EHClause.FilterOffset && offs < EHClause.HandlerEndPC) { pOffsetToHandlerInfo[j].isInFilterOrHandler = TRUE; } } // For anything else, only care about handler range else if (offs >= EHClause.HandlerStartPC && offs < EHClause.HandlerEndPC) { pOffsetToHandlerInfo[j].isInFilterOrHandler = TRUE; } } } } #endif // #ifndef DACCESS_COMPILE void EEDbgInterfaceImpl::GetMethodRegionInfo(const PCODE pStart, PCODE * pCold, size_t *hotSize, size_t *coldSize) { CONTRACTL { SO_INTOLERANT; NOTHROW; GC_NOTRIGGER; PRECONDITION(CheckPointer(pCold)); PRECONDITION(CheckPointer(hotSize)); PRECONDITION(CheckPointer(coldSize)); SUPPORTS_DAC; } CONTRACTL_END; IJitManager::MethodRegionInfo methodRegionInfo = {NULL, 0, NULL, 0}; EECodeInfo codeInfo(pStart); if (codeInfo.IsValid() != NULL) { codeInfo.GetMethodRegionInfo(&methodRegionInfo); } *pCold = methodRegionInfo.coldStartAddress; *hotSize = methodRegionInfo.hotSize; *coldSize = methodRegionInfo.coldSize; } #if defined(WIN64EXCEPTIONS) DWORD EEDbgInterfaceImpl::GetFuncletStartOffsets(const BYTE *pStart, DWORD* pStartOffsets, DWORD dwLength) { CONTRACTL { NOTHROW; GC_NOTRIGGER; PRECONDITION(CheckPointer(pStart)); } CONTRACTL_END; EECodeInfo codeInfo((PCODE)pStart); _ASSERTE(codeInfo.IsValid()); return codeInfo.GetJitManager()->GetFuncletStartOffsets(codeInfo.GetMethodToken(), pStartOffsets, dwLength); } StackFrame EEDbgInterfaceImpl::FindParentStackFrame(CrawlFrame* pCF) { CONTRACTL { NOTHROW; GC_NOTRIGGER; PRECONDITION(CheckPointer(pCF)); } CONTRACTL_END; #if defined(DACCESS_COMPILE) DacNotImpl(); return StackFrame(); #else // !DACCESS_COMPILE return ExceptionTracker::FindParentStackFrameForStackWalk(pCF); #endif // !DACCESS_COMPILE } #endif // WIN64EXCEPTIONS #ifndef DACCESS_COMPILE size_t EEDbgInterfaceImpl::GetFunctionSize(MethodDesc *pFD) { CONTRACTL { NOTHROW; GC_NOTRIGGER; PRECONDITION(CheckPointer(pFD)); } CONTRACTL_END; PCODE methodStart = pFD->GetNativeCode(); if (methodStart == NULL) return 0; EECodeInfo codeInfo(methodStart); PTR_VOID methodInfo = codeInfo.GetGCInfo(); return codeInfo.GetCodeManager()->GetFunctionSize(methodInfo); } #endif //!DACCESS_COMPILE const PCODE EEDbgInterfaceImpl::GetFunctionAddress(MethodDesc *pFD) { CONTRACTL { NOTHROW; GC_NOTRIGGER; PRECONDITION(CheckPointer(pFD)); SUPPORTS_DAC; } CONTRACTL_END; return pFD->GetNativeCode(); } #ifndef DACCESS_COMPILE void EEDbgInterfaceImpl::DisablePreemptiveGC(void) { CONTRACTL { SO_INTOLERANT; NOTHROW; DISABLED(GC_TRIGGERS); // Disabled because disabled in RareDisablePreemptiveGC() } CONTRACTL_END; ::GetThread()->DisablePreemptiveGC(); } void EEDbgInterfaceImpl::EnablePreemptiveGC(void) { CONTRACTL { SO_INTOLERANT; NOTHROW; DISABLED(GC_TRIGGERS); // Disabled because disabled in RareEnablePreemptiveGC() } CONTRACTL_END; ::GetThread()->EnablePreemptiveGC(); } bool EEDbgInterfaceImpl::IsPreemptiveGCDisabled(void) { CONTRACTL { SO_TOLERANT; NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; return ::GetThread()->PreemptiveGCDisabled() != 0; } DWORD EEDbgInterfaceImpl::MethodDescIsStatic(MethodDesc *pFD) { CONTRACTL { NOTHROW; GC_NOTRIGGER; PRECONDITION(CheckPointer(pFD)); } CONTRACTL_END; return pFD->IsStatic(); } #endif // #ifndef DACCESS_COMPILE Module *EEDbgInterfaceImpl::MethodDescGetModule(MethodDesc *pFD) { CONTRACT(Module *) { NOTHROW; GC_NOTRIGGER; PRECONDITION(CheckPointer(pFD)); POSTCONDITION(CheckPointer(RETVAL, NULL_OK)); } CONTRACT_END; RETURN pFD->GetModule(); } #ifndef DACCESS_COMPILE COR_ILMETHOD* EEDbgInterfaceImpl::MethodDescGetILHeader(MethodDesc *pFD) { CONTRACT(COR_ILMETHOD *) { THROWS; GC_NOTRIGGER; PRECONDITION(CheckPointer(pFD)); POSTCONDITION(CheckPointer(RETVAL, NULL_OK)); } CONTRACT_END; if (pFD->IsIL()) { RETURN pFD->GetILHeader(); } RETURN NULL; } ULONG EEDbgInterfaceImpl::MethodDescGetRVA(MethodDesc *pFD) { CONTRACTL { SO_NOT_MAINLINE; NOTHROW; GC_NOTRIGGER; PRECONDITION(CheckPointer(pFD)); } CONTRACTL_END; return pFD->GetRVA(); } MethodDesc *EEDbgInterfaceImpl::FindLoadedMethodRefOrDef(Module* pModule, mdToken memberRef) { CONTRACT(MethodDesc *) { NOTHROW; GC_NOTRIGGER; PRECONDITION(CheckPointer(pModule)); POSTCONDITION(CheckPointer(RETVAL, NULL_OK)); } CONTRACT_END; // Must have a MemberRef or a MethodDef mdToken tkType = TypeFromToken(memberRef); _ASSERTE((tkType == mdtMemberRef) || (tkType == mdtMethodDef)); if (tkType == mdtMemberRef) { RETURN pModule->LookupMemberRefAsMethod(memberRef); } RETURN pModule->LookupMethodDef(memberRef); } MethodDesc *EEDbgInterfaceImpl::LoadMethodDef(Module* pModule, mdMethodDef methodDef, DWORD numGenericArgs, TypeHandle *pGenericArgs, TypeHandle *pOwnerType) { CONTRACT(MethodDesc *) { THROWS; GC_TRIGGERS; PRECONDITION(CheckPointer(pModule)); POSTCONDITION(CheckPointer(RETVAL)); } CONTRACT_END; _ASSERTE(TypeFromToken(methodDef) == mdtMethodDef); // The generic class and method args are sent as one array // by the debugger. We now split this into two by finding out how // many generic args are for the class and how many for the // method. The actual final checks are done in MemberLoader::GetMethodDescFromMethodDef. DWORD numGenericClassArgs = 0; TypeHandle *pGenericClassArgs = NULL; DWORD nGenericMethodArgs = 0; TypeHandle *pGenericMethodArgs = NULL; mdTypeDef typeDef = 0; TypeHandle thOwner; BOOL forceRemotable = FALSE; if (numGenericArgs != 0) { HRESULT hr = pModule->GetMDImport()->GetParentToken(methodDef, &typeDef); if (FAILED(hr)) COMPlusThrowHR(E_INVALIDARG); TypeHandle thClass = LoadClass(pModule, typeDef); _ASSERTE(!thClass.IsNull()); numGenericClassArgs = thClass.GetNumGenericArgs(); if (numGenericArgs < numGenericClassArgs) { COMPlusThrowHR(COR_E_TARGETPARAMCOUNT); } pGenericClassArgs = (numGenericClassArgs > 0) ? pGenericArgs : NULL; nGenericMethodArgs = (numGenericArgs >= numGenericClassArgs) ? (numGenericArgs - numGenericClassArgs) : 0; pGenericMethodArgs = (nGenericMethodArgs > 0) ? (pGenericArgs + numGenericClassArgs) : NULL; #ifdef FEATURE_COMINTEROP if (numGenericClassArgs > 0) { thOwner = ClassLoader::LoadGenericInstantiationThrowing(pModule, typeDef, Instantiation(pGenericClassArgs, numGenericClassArgs)); // for classes supporting generic interop force remotable method descs forceRemotable = thOwner.GetMethodTable()->SupportsGenericInterop(TypeHandle::Interop_ManagedToNative); } #endif // FEATURE_COMINTEROP } MethodDesc *pRes = MemberLoader::GetMethodDescFromMethodDef(pModule, methodDef, Instantiation(pGenericClassArgs, numGenericClassArgs), Instantiation(pGenericMethodArgs, nGenericMethodArgs), forceRemotable); // The ownerType is extra information that augments the specification of an interface MD. // It is only needed if generics code sharing is supported, because otherwise MDs are // fully self-describing. if (pOwnerType != NULL) { if (numGenericClassArgs != 0) { if (thOwner.IsNull()) *pOwnerType = ClassLoader::LoadGenericInstantiationThrowing(pModule, typeDef, Instantiation(pGenericClassArgs, numGenericClassArgs)); else *pOwnerType = thOwner; } else { *pOwnerType = TypeHandle(pRes->GetMethodTable()); } } RETURN (pRes); } TypeHandle EEDbgInterfaceImpl::FindLoadedClass(Module *pModule, mdTypeDef classToken) { CONTRACT(TypeHandle) { NOTHROW; GC_NOTRIGGER; PRECONDITION(CheckPointer(pModule)); } CONTRACT_END; RETURN ClassLoader::LookupTypeDefOrRefInModule(pModule, classToken); } TypeHandle EEDbgInterfaceImpl::FindLoadedInstantiation(Module *pModule, mdTypeDef typeDef, DWORD ntypars, TypeHandle *inst) { // Lookup operations run the class loader in non-load mode. ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE(); // scan violation: asserts that this can be suppressed since there is currently // work on dac-izing all this code and as a result the issue will become moot. CONTRACT_VIOLATION(FaultViolation); return ClassLoader::LoadGenericInstantiationThrowing(pModule, typeDef, Instantiation(inst, ntypars), ClassLoader::DontLoadTypes); } TypeHandle EEDbgInterfaceImpl::FindLoadedFnptrType(TypeHandle *inst, DWORD ntypars) { // Lookup operations run the class loader in non-load mode. ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE(); //<TODO> : CALLCONV? </TODO> return ClassLoader::LoadFnptrTypeThrowing(0, ntypars, inst, // <TODO> should this be FailIfNotLoaded? - NO - although we may // want to debug unrestored VCs, we can't do it because the debug API // is not set up to handle them </TODO> // == FailIfNotLoadedOrNotRestored ClassLoader::DontLoadTypes); } TypeHandle EEDbgInterfaceImpl::FindLoadedPointerOrByrefType(CorElementType et, TypeHandle elemtype) { // Lookup operations run the class loader in non-load mode. ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE(); return ClassLoader::LoadPointerOrByrefTypeThrowing(et, elemtype, // <TODO> should this be FailIfNotLoaded? - NO - although we may // want to debug unrestored VCs, we can't do it because the debug API // is not set up to handle them </TODO> // == FailIfNotLoadedOrNotRestored ClassLoader::DontLoadTypes); } TypeHandle EEDbgInterfaceImpl::FindLoadedArrayType(CorElementType et, TypeHandle elemtype, unsigned rank) { // Lookup operations run the class loader in non-load mode. ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE(); if (elemtype.IsNull()) return TypeHandle(); else return ClassLoader::LoadArrayTypeThrowing(elemtype, et, rank, // <TODO> should this be FailIfNotLoaded? - NO - although we may // want to debug unrestored VCs, we can't do it because the debug API // is not set up to handle them </TODO> // == FailIfNotLoadedOrNotRestored ClassLoader::DontLoadTypes ); } TypeHandle EEDbgInterfaceImpl::FindLoadedElementType(CorElementType et) { // Lookup operations run the class loader in non-load mode. ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE(); MethodTable *m = MscorlibBinder::GetElementType(et); return TypeHandle(m); } TypeHandle EEDbgInterfaceImpl::LoadClass(Module *pModule, mdTypeDef classToken) { CONTRACT(TypeHandle) { THROWS; GC_TRIGGERS; PRECONDITION(CheckPointer(pModule)); } CONTRACT_END; RETURN ClassLoader::LoadTypeDefOrRefThrowing(pModule, classToken, ClassLoader::ThrowIfNotFound, ClassLoader::PermitUninstDefOrRef); } TypeHandle EEDbgInterfaceImpl::LoadInstantiation(Module *pModule, mdTypeDef typeDef, DWORD ntypars, TypeHandle *inst) { CONTRACT(TypeHandle) { THROWS; GC_TRIGGERS; PRECONDITION(CheckPointer(pModule)); } CONTRACT_END; RETURN ClassLoader::LoadGenericInstantiationThrowing(pModule, typeDef, Instantiation(inst, ntypars)); } TypeHandle EEDbgInterfaceImpl::LoadArrayType(CorElementType et, TypeHandle elemtype, unsigned rank) { CONTRACT(TypeHandle) { THROWS; GC_TRIGGERS; } CONTRACT_END; if (elemtype.IsNull()) RETURN TypeHandle(); else RETURN ClassLoader::LoadArrayTypeThrowing(elemtype, et, rank); } TypeHandle EEDbgInterfaceImpl::LoadPointerOrByrefType(CorElementType et, TypeHandle elemtype) { CONTRACTL { SO_NOT_MAINLINE; THROWS; GC_TRIGGERS; } CONTRACTL_END; return ClassLoader::LoadPointerOrByrefTypeThrowing(et, elemtype); } TypeHandle EEDbgInterfaceImpl::LoadFnptrType(TypeHandle *inst, DWORD ntypars) { CONTRACTL { SO_NOT_MAINLINE; THROWS; GC_TRIGGERS; } CONTRACTL_END; /* @TODO : CALLCONV? */ return ClassLoader::LoadFnptrTypeThrowing(0, ntypars, inst); } TypeHandle EEDbgInterfaceImpl::LoadElementType(CorElementType et) { CONTRACTL { SO_NOT_MAINLINE; THROWS; GC_TRIGGERS; } CONTRACTL_END; MethodTable *m = MscorlibBinder::GetElementType(et); if (m == NULL) { return TypeHandle(); } return TypeHandle(m); } HRESULT EEDbgInterfaceImpl::GetMethodImplProps(Module *pModule, mdToken tk, DWORD *pRVA, DWORD *pImplFlags) { CONTRACTL { SO_NOT_MAINLINE; NOTHROW; GC_NOTRIGGER; PRECONDITION(CheckPointer(pModule)); } CONTRACTL_END; return pModule->GetMDImport()->GetMethodImplProps(tk, pRVA, pImplFlags); } HRESULT EEDbgInterfaceImpl::GetParentToken(Module *pModule, mdToken tk, mdToken *pParentToken) { CONTRACTL { SO_NOT_MAINLINE; NOTHROW; GC_NOTRIGGER; PRECONDITION(CheckPointer(pModule)); } CONTRACTL_END; return pModule->GetMDImport()->GetParentToken(tk, pParentToken); } void EEDbgInterfaceImpl::MarkDebuggerAttached(void) { CONTRACTL { SO_NOT_MAINLINE; NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; g_CORDebuggerControlFlags |= DBCF_ATTACHED; g_CORDebuggerControlFlags &= ~DBCF_PENDING_ATTACH; } void EEDbgInterfaceImpl::MarkDebuggerUnattached(void) { CONTRACTL { SO_NOT_MAINLINE; NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; g_CORDebuggerControlFlags &= ~DBCF_ATTACHED; } #ifdef EnC_SUPPORTED // Apply an EnC edit to the specified module HRESULT EEDbgInterfaceImpl::EnCApplyChanges(EditAndContinueModule *pModule, DWORD cbMetadata, BYTE *pMetadata, DWORD cbIL, BYTE *pIL) { LOG((LF_ENC, LL_INFO100, "EncApplyChanges\n")); CONTRACTL { SO_NOT_MAINLINE; DISABLED(THROWS); DISABLED(GC_TRIGGERS); PRECONDITION(CheckPointer(pModule)); } CONTRACTL_END; return pModule->ApplyEditAndContinue(cbMetadata, pMetadata, cbIL, pIL); } // Remap execution to the latest version of an edited method // This function should never return. void EEDbgInterfaceImpl::ResumeInUpdatedFunction(EditAndContinueModule *pModule, MethodDesc *pFD, void *debuggerFuncHandle, SIZE_T resumeIP, CONTEXT *pContext) { CONTRACTL { SO_NOT_MAINLINE; DISABLED(THROWS); DISABLED(GC_TRIGGERS); PRECONDITION(CheckPointer(pModule)); } CONTRACTL_END; pModule->ResumeInUpdatedFunction(pFD, debuggerFuncHandle, resumeIP, pContext); } #endif // EnC_SUPPORTED bool EEDbgInterfaceImpl::CrawlFrameIsGcSafe(CrawlFrame *pCF) { CONTRACTL { SO_NOT_MAINLINE; SO_NOT_MAINLINE; NOTHROW; GC_NOTRIGGER; PRECONDITION(CheckPointer(pCF)); } CONTRACTL_END; return pCF->IsGcSafe(); } bool EEDbgInterfaceImpl::IsStub(const BYTE *ip) { CONTRACTL { SO_NOT_MAINLINE; NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; // IsStub will catch any exceptions and return false. return StubManager::IsStub((PCODE) ip) != FALSE; } #endif // #ifndef DACCESS_COMPILE // static bool EEDbgInterfaceImpl::DetectHandleILStubs(Thread *thread) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; return thread->DetectHandleILStubsForDebugger(); } bool EEDbgInterfaceImpl::TraceStub(const BYTE *ip, TraceDestination *trace) { #ifndef DACCESS_COMPILE CONTRACTL { SO_NOT_MAINLINE; NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; return StubManager::TraceStub((PCODE) ip, trace) != FALSE; #else DacNotImpl(); return false; #endif // #ifndef DACCESS_COMPILE } #ifndef DACCESS_COMPILE bool EEDbgInterfaceImpl::FollowTrace(TraceDestination *trace) { CONTRACTL { SO_NOT_MAINLINE; NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; return StubManager::FollowTrace(trace) != FALSE; } bool EEDbgInterfaceImpl::TraceFrame(Thread *thread, Frame *frame, BOOL fromPatch, TraceDestination *trace, REGDISPLAY *regs) { CONTRACTL { SO_NOT_MAINLINE; THROWS; DISABLED(GC_TRIGGERS); // This is not a bug - the debugger can call this on an un-managed thread. PRECONDITION(CheckPointer(frame)); } CONTRACTL_END; bool fResult = frame->TraceFrame(thread, fromPatch, trace, regs) != FALSE; #ifdef _DEBUG StubManager::DbgWriteLog("Doing TraceFrame on frame=0x%p (fromPatch=%d), yeilds:\n", frame, fromPatch); if (fResult) { SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE; FAULT_NOT_FATAL(); SString buffer; StubManager::DbgWriteLog(" td=%S\n", trace->DbgToString(buffer)); } else { StubManager::DbgWriteLog(" false (this frame does not expect to call managed code).\n"); } #endif return fResult; } bool EEDbgInterfaceImpl::TraceManager(Thread *thread, StubManager *stubManager, TraceDestination *trace, CONTEXT *context, BYTE **pRetAddr) { CONTRACTL { SO_NOT_MAINLINE; NOTHROW; GC_TRIGGERS; PRECONDITION(CheckPointer(stubManager)); } CONTRACTL_END; bool fResult = false; EX_TRY { fResult = stubManager->TraceManager(thread, trace, context, pRetAddr) != FALSE; } EX_CATCH { // We never expect TraceManager() to fail and throw an exception, // so we should never hit this assertion. _ASSERTE(!"Fail to trace a stub through TraceManager()"); fResult = false; } EX_END_CATCH(SwallowAllExceptions); #ifdef _DEBUG StubManager::DbgWriteLog("Doing TraceManager on %s (0x%p) for IP=0x%p, yields:\n", stubManager->DbgGetName(), stubManager, GetIP(context)); if (fResult) { // Should never be on helper thread FAULT_NOT_FATAL(); SString buffer; StubManager::DbgWriteLog(" td=%S\n", trace->DbgToString(buffer)); } else { StubManager::DbgWriteLog(" false (this stub does not expect to call managed code).\n"); } #endif return fResult; } void EEDbgInterfaceImpl::EnableTraceCall(Thread *thread) { CONTRACTL { SO_NOT_MAINLINE; NOTHROW; GC_NOTRIGGER; PRECONDITION(CheckPointer(thread)); } CONTRACTL_END; thread->IncrementTraceCallCount(); } void EEDbgInterfaceImpl::DisableTraceCall(Thread *thread) { CONTRACTL { SO_NOT_MAINLINE; NOTHROW; GC_NOTRIGGER; PRECONDITION(CheckPointer(thread)); } CONTRACTL_END; thread->DecrementTraceCallCount(); } #ifdef FEATURE_IMPLICIT_TLS EXTERN_C UINT32 _tls_index; #endif void EEDbgInterfaceImpl::GetRuntimeOffsets(SIZE_T *pTLSIndex, SIZE_T *pTLSIsSpecialIndex, SIZE_T *pTLSCantStopIndex, SIZE_T* pTLSIndexOfPredefs, SIZE_T *pEEThreadStateOffset, SIZE_T *pEEThreadStateNCOffset, SIZE_T *pEEThreadPGCDisabledOffset, DWORD *pEEThreadPGCDisabledValue, SIZE_T *pEEThreadDebuggerWordOffset, SIZE_T *pEEThreadFrameOffset, SIZE_T *pEEThreadMaxNeededSize, DWORD *pEEThreadSteppingStateMask, DWORD *pEEMaxFrameValue, SIZE_T *pEEThreadDebuggerFilterContextOffset, SIZE_T *pEEThreadCantStopOffset, SIZE_T *pEEFrameNextOffset, DWORD *pEEIsManagedExceptionStateMask) { CONTRACTL { SO_INTOLERANT; NOTHROW; GC_NOTRIGGER; PRECONDITION(CheckPointer(pTLSIndex)); PRECONDITION(CheckPointer(pTLSIsSpecialIndex)); PRECONDITION(CheckPointer(pEEThreadStateOffset)); PRECONDITION(CheckPointer(pTLSIndexOfPredefs)); PRECONDITION(CheckPointer(pEEThreadStateNCOffset)); PRECONDITION(CheckPointer(pEEThreadPGCDisabledOffset)); PRECONDITION(CheckPointer(pEEThreadPGCDisabledValue)); PRECONDITION(CheckPointer(pEEThreadDebuggerWordOffset)); PRECONDITION(CheckPointer(pEEThreadFrameOffset)); PRECONDITION(CheckPointer(pEEThreadMaxNeededSize)); PRECONDITION(CheckPointer(pEEThreadSteppingStateMask)); PRECONDITION(CheckPointer(pEEMaxFrameValue)); PRECONDITION(CheckPointer(pEEThreadDebuggerFilterContextOffset)); PRECONDITION(CheckPointer(pEEThreadCantStopOffset)); PRECONDITION(CheckPointer(pEEFrameNextOffset)); PRECONDITION(CheckPointer(pEEIsManagedExceptionStateMask)); } CONTRACTL_END; #ifdef FEATURE_IMPLICIT_TLS *pTLSIndex = _tls_index; #else *pTLSIndex = GetThreadTLSIndex(); #endif *pTLSIsSpecialIndex = TlsIdx_ThreadType; *pTLSCantStopIndex = TlsIdx_CantStopCount; *pTLSIndexOfPredefs = CExecutionEngine::TlsIndex; *pEEThreadStateOffset = Thread::GetOffsetOfState(); *pEEThreadStateNCOffset = Thread::GetOffsetOfStateNC(); *pEEThreadPGCDisabledOffset = Thread::GetOffsetOfGCFlag(); *pEEThreadPGCDisabledValue = 1; // A little obvious, but just in case... *pEEThreadDebuggerWordOffset = Thread::GetOffsetOfDebuggerWord(); *pEEThreadFrameOffset = Thread::GetOffsetOfCurrentFrame(); *pEEThreadMaxNeededSize = sizeof(Thread); *pEEThreadDebuggerFilterContextOffset = Thread::GetOffsetOfDebuggerFilterContext(); *pEEThreadCantStopOffset = Thread::GetOffsetOfCantStop(); *pEEThreadSteppingStateMask = Thread::TSNC_DebuggerIsStepping; *pEEMaxFrameValue = (DWORD)(size_t)FRAME_TOP; // <TODO> should this be size_t for 64bit?</TODO> *pEEFrameNextOffset = Frame::GetOffsetOfNextLink(); *pEEIsManagedExceptionStateMask = Thread::TSNC_DebuggerIsManagedException; } void EEDbgInterfaceImpl::DebuggerModifyingLogSwitch (int iNewLevel, const WCHAR *pLogSwitchName) { CONTRACTL { SO_NOT_MAINLINE; THROWS; GC_NOTRIGGER; } CONTRACTL_END; Log::DebuggerModifyingLogSwitch (iNewLevel, pLogSwitchName); } HRESULT EEDbgInterfaceImpl::SetIPFromSrcToDst(Thread *pThread, SLOT addrStart, DWORD offFrom, DWORD offTo, bool fCanSetIPOnly, PREGDISPLAY pReg, PCONTEXT pCtx, void *pDji, EHRangeTree *pEHRT) { CONTRACTL { SO_NOT_MAINLINE; THROWS; GC_TRIGGERS; } CONTRACTL_END; return ::SetIPFromSrcToDst(pThread, addrStart, offFrom, offTo, fCanSetIPOnly, pReg, pCtx, pDji, pEHRT); } void EEDbgInterfaceImpl::SetDebugState(Thread *pThread, CorDebugThreadState state) { CONTRACTL { SO_NOT_MAINLINE; NOTHROW; GC_NOTRIGGER; PRECONDITION(CheckPointer(pThread)); } CONTRACTL_END; _ASSERTE(state == THREAD_SUSPEND || state == THREAD_RUN); LOG((LF_CORDB,LL_INFO10000,"EEDbg:Setting thread 0x%x (ID:0x%x) to 0x%x\n", pThread, pThread->GetThreadId(), state)); if (state == THREAD_SUSPEND) { pThread->SetThreadStateNC(Thread::TSNC_DebuggerUserSuspend); } else { pThread->ResetThreadStateNC(Thread::TSNC_DebuggerUserSuspend); } } void EEDbgInterfaceImpl::SetAllDebugState(Thread *et, CorDebugThreadState state) { CONTRACTL { SO_NOT_MAINLINE; NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; Thread *pThread = NULL; while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL) { if (pThread != et) { SetDebugState(pThread, state); } } } // This is pretty much copied from VM\COMSynchronizable's // INT32 __stdcall ThreadNative::GetThreadState, so propogate changes // to both functions // This just gets the user state from the EE's perspective (hence "partial"). CorDebugUserState EEDbgInterfaceImpl::GetPartialUserState(Thread *pThread) { CONTRACTL { SO_NOT_MAINLINE; NOTHROW; GC_NOTRIGGER; PRECONDITION(CheckPointer(pThread)); } CONTRACTL_END; Thread::ThreadState ts = pThread->GetSnapshotState(); unsigned ret = 0; if (ts & Thread::TS_Background) { ret |= (unsigned)USER_BACKGROUND; } if (ts & Thread::TS_Unstarted) { ret |= (unsigned)USER_UNSTARTED; } // Don't report a StopRequested if the thread has actually stopped. if (ts & Thread::TS_Dead) { ret |= (unsigned)USER_STOPPED; } if (ts & Thread::TS_Interruptible) { ret |= (unsigned)USER_WAIT_SLEEP_JOIN; } // Don't report a SuspendRequested if the thread has actually Suspended. if ( ((ts & Thread::TS_UserSuspendPending) && (ts & Thread::TS_SyncSuspended))) { ret |= (unsigned)USER_SUSPENDED; } else if (ts & Thread::TS_UserSuspendPending) { ret |= (unsigned)USER_SUSPEND_REQUESTED; } LOG((LF_CORDB,LL_INFO1000, "EEDbgII::GUS: thread 0x%x (id:0x%x)" " userThreadState is 0x%x\n", pThread, pThread->GetThreadId(), ret)); return (CorDebugUserState)ret; } #endif // #ifndef DACCESS_COMPILE #ifdef DACCESS_COMPILE void EEDbgInterfaceImpl::EnumMemoryRegions(CLRDataEnumMemoryFlags flags) { DAC_ENUM_VTHIS(); } #endif unsigned EEDbgInterfaceImpl::GetSizeForCorElementType(CorElementType etyp) { WRAPPER_NO_CONTRACT; return (::GetSizeForCorElementType(etyp)); } #ifndef DACCESS_COMPILE /* * ObjIsInstanceOf * * This method supplies the internal VM implementation of this method to the * debugger left-side. * */ BOOL EEDbgInterfaceImpl::ObjIsInstanceOf(Object *pElement, TypeHandle toTypeHnd) { WRAPPER_NO_CONTRACT; return (::ObjIsInstanceOf(pElement, toTypeHnd)); } #endif /* * ClearAllDebugInterfaceReferences * * This method is called by the debugging part of the runtime to notify * that the debugger resources are no longer valid and any internal references * to it must be null'ed out. * * Parameters: * None. * * Returns: * None. * */ void EEDbgInterfaceImpl::ClearAllDebugInterfaceReferences() { LIMITED_METHOD_CONTRACT; g_pDebugInterface = NULL; } #ifndef DACCESS_COMPILE #ifdef _DEBUG /* * ObjectRefFlush * * Flushes all debug tracking information for object referencing. * * Parameters: * pThread - The target thread to flush object references of. * * Returns: * None. * */ void EEDbgInterfaceImpl::ObjectRefFlush(Thread *pThread) { WRAPPER_NO_CONTRACT; Thread::ObjectRefFlush(pThread); } #endif #endif #endif // DEBUGGING_SUPPORTED
kangaroo/coreclr
src/vm/eedbginterfaceimpl.cpp
C++
mit
45,676
<?php namespace Codeception\Module; // here you can define custom functions for CodeGuy class EmulateModuleHelper extends \Codeception\Module { public $assertions = 0; public function seeEquals($expected, $actual) { \PHPUnit_Framework_Assert::assertEquals($expected, $actual); $this->assertions++; } public function seeFeaturesEquals($expected) { \PHPUnit_Framework_Assert::assertEquals($expected, $this->scenario->getFeature()); } public function _before(\Codeception\TestCase $test) { $this->scenario = $test->getScenario(); } }
KannipaB/be-grav
vendor/codeception/codeception/tests/support/EmulateModuleHelper.php
PHP
mit
602
/* * webui popover plugin - v1.1.3 * A lightWeight popover plugin with jquery ,enchance the popover plugin of bootstrap with some awesome new features. It works well with bootstrap ,but bootstrap is not necessary! * https://github.com/sandywalker/webui-popover * * Made by Sandy Duan * Under MIT License */ ;(function ( $, window, document, undefined ) { // Create the defaults once var pluginName = 'webuiPopover'; var pluginClass = 'webui-popover'; var pluginType = 'webui.popover'; var defaults = { placement:'auto', width:'auto', height:'auto', trigger:'click', style:'', delay: { show: null, hide: null }, async: { before: null, //function(that, xhr){} success: null //function(that, xhr){} }, cache:true, multi:false, arrow:true, title:'', content:'', closeable:false, padding:true, url:'', type:'html', constrains:null, animation:null, template:'<div class="webui-popover">'+ '<div class="arrow"></div>'+ '<div class="webui-popover-inner">'+ '<a href="#" class="close">x</a>'+ '<h3 class="webui-popover-title"></h3>'+ '<div class="webui-popover-content"><i class="icon-refresh"></i> <p>&nbsp;</p></div>'+ '</div>'+ '</div>' }; var _globalIdSeed = 0; // The actual plugin constructor function WebuiPopover ( element, options ) { this.$element = $(element); if (options){ if($.type(options.delay) === 'string' || $.type(options.delay) === 'number') { options.delay = {show:options.delay,hide:options.delay}; // bc break fix } } this.options = $.extend( {}, defaults, options ); this._defaults = defaults; this._name = pluginName; this._targetclick = false; this.init(); } WebuiPopover.prototype = { //init webui popover init: function () { //init the event handlers if (this.getTrigger()==='click'){ this.$element.off('click').on('click',$.proxy(this.toggle,this)); }else if (this.getTrigger()==='hover'){ this.$element.off('mouseenter mouseleave click') .on('mouseenter',$.proxy(this.mouseenterHandler,this)) .on('mouseleave',$.proxy(this.mouseleaveHandler,this)) .on('click',function(e){e.stopPropagation();}); } this._poped = false; this._inited = true; this._idSeed = _globalIdSeed; _globalIdSeed++; }, /* api methods and actions */ destroy:function(){ this.hide(); this.$element.data('plugin_'+pluginName,null); if (this.getTrigger()==='click'){ this.$element.off('click'); }else if (this.getTrigger()==='hover'){ this.$element.off('mouseenter mouseleave'); } if (this.$target){ this.$target.remove(); } }, hide:function(event){ if (event){ event.preventDefault(); event.stopPropagation(); } if (this.xhr){ this.xhr.abort(); this.xhr = null; } var e = $.Event('hide.' + pluginType); this.$element.trigger(e); if (this.$target){this.$target.removeClass('in').hide();} this.$element.trigger('hidden.'+pluginType); }, toggle:function(e){ if (e) { e.preventDefault(); e.stopPropagation(); } this[this.getTarget().hasClass('in') ? 'hide' : 'show'](); }, hideAll:function(){ $('div.webui-popover').not('.webui-popover-fixed').removeClass('in').hide(); }, /*core method ,show popover */ show:function(){ var $target = this.getTarget().removeClass().addClass(pluginClass); if (!this.options.multi){ this.hideAll(); } // use cache by default, if not cache setted , reInit the contents if (!this.getCache()||!this._poped){ this.content = ''; this.setTitle(this.getTitle()); if (!this.options.closeable){ $target.find('.close').off('click').remove(); } if (!this.isAsync()){ this.setContent(this.getContent()); }else{ this.setContentASync(this.options.content); this.displayContent(); return; } $target.show(); } this.displayContent(); this.bindBodyEvents(); }, displayContent:function(){ var //element postion elementPos = this.getElementPosition(), //target postion $target = this.getTarget().removeClass().addClass(pluginClass), //target content $targetContent = this.getContentElement(), //target Width targetWidth = $target[0].offsetWidth, //target Height targetHeight = $target[0].offsetHeight, //placement placement = 'bottom', e = $.Event('show.' + pluginType); //if (this.hasContent()){ this.$element.trigger(e); //} if (this.options.width!=='auto') {$target.width(this.options.width);} if (this.options.height!=='auto'){$targetContent.height(this.options.height);} //init the popover and insert into the document body if (!this.options.arrow){ $target.find('.arrow').remove(); } $target.remove().css({ top: -2000, left: -2000, display: 'block' }); if (this.getAnimation()){ $target.addClass(this.getAnimation()); } $target.appendTo(document.body); targetWidth = $target[0].offsetWidth; targetHeight = $target[0].offsetHeight; placement = this.getPlacement(elementPos); this.initTargetEvents(); var postionInfo = this.getTargetPositin(elementPos,placement,targetWidth,targetHeight); this.$target.css(postionInfo.position).addClass(placement).addClass('in'); if (this.options.type==='iframe'){ var $iframe = $target.find('iframe'); $iframe.width($target.width()).height($iframe.parent().height()); } if (this.options.style){ this.$target.addClass(pluginClass+'-'+this.options.style); } if (!this.options.padding){ $targetContent.css('height',$targetContent.outerHeight()); this.$target.addClass('webui-no-padding'); } if (!this.options.arrow){ this.$target.css({'margin':0}); } if (this.options.arrow){ var $arrow = this.$target.find('.arrow'); $arrow.removeAttr('style'); if (postionInfo.arrowOffset){ $arrow.css(postionInfo.arrowOffset); } } this._poped = true; this.$element.trigger('shown.'+pluginType); }, isTargetLoaded:function(){ return this.getTarget().find('i.glyphicon-refresh').length===0; }, /*getter setters */ getTriggerElement:function(){ return this.$element; }, getTarget:function(){ if (!this.$target){ var id = pluginName+this._idSeed; this.$target = $(this.options.template) .attr('id',id) .data('trigger-element',this.getTriggerElement()); this.getTriggerElement().attr('data-target',id); } return this.$target; }, getTitleElement:function(){ return this.getTarget().find('.'+pluginClass+'-title'); }, getContentElement:function(){ return this.getTarget().find('.'+pluginClass+'-content'); }, getTitle:function(){ return this.$element.attr('data-title')||this.options.title||this.$element.attr('title'); }, getUrl:function(){ return this.$element.attr('data-url')||this.options.url; }, getCache:function(){ var dataAttr = this.$element.attr('data-cache'); if (typeof(dataAttr) !== 'undefined') { switch(dataAttr.toLowerCase()){ case 'true': case 'yes': case '1': return true; case 'false': case 'no': case '0': return false; } } return this.options.cache; }, getTrigger:function(){ return this.$element.attr('data-trigger')||this.options.trigger; }, getDelayShow:function(){ var dataAttr = this.$element.attr('data-delay-show'); if (typeof(dataAttr) !== 'undefined') { return dataAttr; } return this.options.delay.show===0?0:this.options.delay.show||100; }, getHideDelay:function(){ var dataAttr = this.$element.attr('data-delay-hide'); if (typeof(dataAttr) !== 'undefined') { return dataAttr; } return this.options.delay.hide===0?0:this.options.delay.hide||100; }, getConstrains:function(){ var dataAttr = this.$element.attr('data-contrains'); if (typeof(dataAttr) !== 'undefined') { return dataAttr; } return this.options.constrains; }, getAnimation:function(){ var dataAttr = this.$element.attr('data-animation'); return dataAttr||this.options.animation; }, setTitle:function(title){ var $titleEl = this.getTitleElement(); if (title){ $titleEl.html(title); }else{ $titleEl.remove(); } }, hasContent:function () { return this.getContent(); }, getContent:function(){ if (this.getUrl()){ if (this.options.type==='iframe'){ this.content = $('<iframe frameborder="0"></iframe>').attr('src',this.getUrl()); } }else if (!this.content){ var content=''; if ($.isFunction(this.options.content)){ content = this.options.content.apply(this.$element[0],arguments); }else{ content = this.options.content; } this.content = this.$element.attr('data-content')||content; } return this.content; }, setContent:function(content){ var $target = this.getTarget(); this.getContentElement().html(content); this.$target = $target; }, isAsync:function(){ return this.options.type==='async'; }, setContentASync:function(content){ var that = this; this.xhr = $.ajax({ url:this.getUrl(), type:'GET', cache:this.getCache(), beforeSend:function(xhr) { if (that.options.async.before){ that.options.async.before(that, xhr); } }, success:function(data){ that.bindBodyEvents(); if (content&&$.isFunction(content)){ that.content = content.apply(that.$element[0],[data]); }else{ that.content = data; } that.setContent(that.content); var $targetContent = that.getContentElement(); $targetContent.removeAttr('style'); that.displayContent(); if (that.options.async.success){ that.options.async.success(that, data); } this.xhr = null; } }); }, bindBodyEvents:function(){ $('body').off('keyup.webui-popover').on('keyup.webui-popover',$.proxy(this.escapeHandler,this)); $('body').off('click.webui-popover').on('click.webui-popover',$.proxy(this.bodyClickHandler,this)); }, /* event handlers */ mouseenterHandler:function(){ var self = this; if (self._timeout){clearTimeout(self._timeout);} self._enterTimeout = setTimeout(function(){ if (!self.getTarget().is(':visible')){self.show();} },this.getDelayShow()); }, mouseleaveHandler:function(){ var self = this; clearTimeout(self._enterTimeout); //key point, set the _timeout then use clearTimeout when mouse leave self._timeout = setTimeout(function(){ self.hide(); },this.getHideDelay()); }, escapeHandler:function(e){ if (e.keyCode===27){ this.hideAll(); } }, bodyClickHandler:function(){ if (this.getTrigger()==='click'){ if (this._targetclick){ this._targetclick = false; }else{ this.hideAll(); } } }, targetClickHandler:function(){ this._targetclick = true; }, //reset and init the target events; initTargetEvents:function(){ if (this.getTrigger()==='hover'){ this.$target.off('mouseenter mouseleave') .on('mouseenter',$.proxy(this.mouseenterHandler,this)) .on('mouseleave',$.proxy(this.mouseleaveHandler,this)); } this.$target.find('.close').off('click').on('click', $.proxy(this.hide,this)); this.$target.off('click.webui-popover').on('click.webui-popover',$.proxy(this.targetClickHandler,this)); }, /* utils methods */ //caculate placement of the popover getPlacement:function(pos){ var placement, de = document.documentElement, db = document.body, clientWidth = de.clientWidth, clientHeight = de.clientHeight, scrollTop = Math.max(db.scrollTop,de.scrollTop), scrollLeft = Math.max(db.scrollLeft,de.scrollLeft), pageX = Math.max(0,pos.left - scrollLeft), pageY = Math.max(0,pos.top - scrollTop); //arrowSize = 20; //if placement equals auto,caculate the placement by element information; if (typeof(this.options.placement)==='function'){ placement = this.options.placement.call(this, this.getTarget()[0], this.$element[0]); }else{ placement = this.$element.data('placement')||this.options.placement; } if (placement==='auto'){ var constrainsH = this.getConstrains() === 'horizontal', constrainsV = this.getConstrains() === 'vertical'; if (pageX<clientWidth/3){ if (pageY<clientHeight/3){ placement = constrainsH?'right-bottom':'bottom-right'; }else if (pageY<clientHeight*2/3){ if (constrainsV){ placement = pageY<=clientHeight/2?'bottom-right':'top-right'; }else{ placement = 'right'; } }else{ placement =constrainsH?'right-top':'top-right'; } //placement= pageY>targetHeight+arrowSize?'top-right':'bottom-right'; }else if (pageX<clientWidth*2/3){ if (pageY<clientHeight/3){ if (constrainsH){ placement =pageX<=clientWidth/2?'right-bottom':'left-bottom'; }else{ placement ='bottom'; } }else if (pageY<clientHeight*2/3){ if (constrainsH){ placement = pageX<=clientWidth/2?'right':'left'; }else{ placement = pageY<=clientHeight/2?'bottom':'top'; } }else{ if (constrainsH){ placement =pageX<=clientWidth/2?'right-top':'left-top'; }else{ placement ='top'; } } }else{ //placement = pageY>targetHeight+arrowSize?'top-left':'bottom-left'; if (pageY<clientHeight/3){ placement = constrainsH?'left-bottom':'bottom-left'; }else if (pageY<clientHeight*2/3){ if (constrainsV){ placement = pageY<=clientHeight/2?'bottom-left':'top-left'; }else{ placement = 'left'; } }else{ placement = constrainsH?'left-top':'top-left'; } } }else if (placement==='auto-top'){ if (pageX<clientWidth/3){ placement='top-right'; }else if (pageX<clientHeight*2/3){ placement='top'; }else{ placement='top-left'; } }else if (placement==='auto-bottom'){ if (pageX<clientWidth/3){ placement='bottom-right'; }else if (pageX<clientHeight*2/3){ placement='bottom'; }else{ placement='bottom-left'; } }else if (placement==='auto-left'){ if (pageY<clientHeight/3){ placement='left-top'; }else if (pageY<clientHeight*2/3){ placement='left'; }else{ placement='left-bottom'; } }else if (placement==='auto-right'){ if (pageY<clientHeight/3){ placement='right-top'; }else if (pageY<clientHeight*2/3){ placement='right'; }else{ placement='right-bottom'; } } return placement; }, getElementPosition:function(){ return $.extend({},this.$element.offset(), { width: this.$element[0].offsetWidth, height: this.$element[0].offsetHeight }); }, getTargetPositin:function(elementPos,placement,targetWidth,targetHeight){ var pos = elementPos, elementW = this.$element.outerWidth(), elementH = this.$element.outerHeight(), position={}, arrowOffset=null, arrowSize = this.options.arrow?20:0, fixedW = elementW<arrowSize+10?arrowSize:0, fixedH = elementH<arrowSize+10?arrowSize:0; switch (placement) { case 'bottom': position = {top: pos.top + pos.height, left: pos.left + pos.width / 2 - targetWidth / 2}; break; case 'top': position = {top: pos.top - targetHeight, left: pos.left + pos.width / 2 - targetWidth / 2}; break; case 'left': position = {top: pos.top + pos.height / 2 - targetHeight / 2, left: pos.left - targetWidth}; break; case 'right': position = {top: pos.top + pos.height / 2 - targetHeight / 2, left: pos.left + pos.width}; break; case 'top-right': position = {top: pos.top - targetHeight, left: pos.left-fixedW}; arrowOffset = {left: Math.min(elementW,targetWidth)/2 + fixedW}; break; case 'top-left': position = {top: pos.top - targetHeight, left: pos.left -targetWidth +pos.width + fixedW}; arrowOffset = {left: targetWidth - Math.min(elementW,targetWidth) /2 -fixedW}; break; case 'bottom-right': position = {top: pos.top + pos.height, left: pos.left-fixedW}; arrowOffset = {left: Math.min(elementW,targetWidth) /2+fixedW}; break; case 'bottom-left': position = {top: pos.top + pos.height, left: pos.left -targetWidth +pos.width+fixedW}; arrowOffset = {left: targetWidth- Math.min(elementW,targetWidth) /2 - fixedW}; break; case 'right-top': position = {top: pos.top -targetHeight + pos.height + fixedH, left: pos.left + pos.width}; arrowOffset = {top: targetHeight - Math.min(elementH,targetHeight)/2 -fixedH}; break; case 'right-bottom': position = {top: pos.top - fixedH, left: pos.left + pos.width}; arrowOffset = {top: Math.min(elementH,targetHeight) /2 +fixedH }; break; case 'left-top': position = {top: pos.top -targetHeight + pos.height+fixedH, left: pos.left - targetWidth}; arrowOffset = {top: targetHeight - Math.min(elementH,targetHeight)/2 - fixedH}; break; case 'left-bottom': position = {top: pos.top -fixedH , left: pos.left -targetWidth}; arrowOffset = {top: Math.min(elementH,targetHeight) /2 + fixedH }; break; } return {position:position,arrowOffset:arrowOffset}; } }; $.fn[ pluginName ] = function ( options ) { return this.each(function() { var webuiPopover = $.data( this, 'plugin_' + pluginName ); if (!webuiPopover) { if (!options){ webuiPopover = new WebuiPopover( this, null); }else if (typeof options ==='string'){ if (options!=='destroy'){ webuiPopover = new WebuiPopover( this, null ); webuiPopover[options](); } }else if (typeof options ==='object'){ webuiPopover = new WebuiPopover( this, options ); } $.data( this, 'plugin_' + pluginName, webuiPopover); }else{ if (options==='destroy'){ webuiPopover.destroy(); }else if (typeof options ==='string'){ webuiPopover[options](); } } }); }; })( jQuery, window, document );
viskin/cdnjs
ajax/libs/webui-popover/1.1.4/jquery.webui-popover.js
JavaScript
mit
20,220
// Copyright Alexander Nasonov & Paul A. Bristow 2006. // Use, modification and distribution are subject to the // Boost Software License, Version 1.0. // (See accompanying file LICENSE_1_0.txt // or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef BOOST_DETAIL_LCAST_PRECISION_HPP_INCLUDED #define BOOST_DETAIL_LCAST_PRECISION_HPP_INCLUDED #include <climits> #include <ios> #include <limits> #include <boost/config.hpp> #include <boost/integer_traits.hpp> #ifndef BOOST_NO_IS_ABSTRACT // Fix for SF:1358600 - lexical_cast & pure virtual functions & VC 8 STL #include <boost/mpl/if.hpp> #include <boost/type_traits/is_abstract.hpp> #endif #if defined(BOOST_NO_LIMITS_COMPILE_TIME_CONSTANTS) || \ (defined(BOOST_MSVC) && (BOOST_MSVC<1310)) #define BOOST_LCAST_NO_COMPILE_TIME_PRECISION #endif #ifdef BOOST_LCAST_NO_COMPILE_TIME_PRECISION #include <boost/assert.hpp> #else #include <boost/static_assert.hpp> #endif namespace boost { namespace detail { class lcast_abstract_stub {}; #ifndef BOOST_LCAST_NO_COMPILE_TIME_PRECISION // Calculate an argument to pass to std::ios_base::precision from // lexical_cast. See alternative implementation for broken standard // libraries in lcast_get_precision below. Keep them in sync, please. template<class T> struct lcast_precision { #ifdef BOOST_NO_IS_ABSTRACT typedef std::numeric_limits<T> limits; // No fix for SF:1358600. #else typedef BOOST_DEDUCED_TYPENAME boost::mpl::if_< boost::is_abstract<T> , std::numeric_limits<lcast_abstract_stub> , std::numeric_limits<T> >::type limits; #endif BOOST_STATIC_CONSTANT(bool, use_default_precision = !limits::is_specialized || limits::is_exact ); BOOST_STATIC_CONSTANT(bool, is_specialized_bin = !use_default_precision && limits::radix == 2 && limits::digits > 0 ); BOOST_STATIC_CONSTANT(bool, is_specialized_dec = !use_default_precision && limits::radix == 10 && limits::digits10 > 0 ); BOOST_STATIC_CONSTANT(std::streamsize, streamsize_max = boost::integer_traits<std::streamsize>::const_max ); BOOST_STATIC_CONSTANT(unsigned int, precision_dec = limits::digits10 + 1U); BOOST_STATIC_ASSERT(!is_specialized_dec || precision_dec <= streamsize_max + 0UL ); BOOST_STATIC_CONSTANT(unsigned long, precision_bin = 2UL + limits::digits * 30103UL / 100000UL ); BOOST_STATIC_ASSERT(!is_specialized_bin || (limits::digits + 0UL < ULONG_MAX / 30103UL && precision_bin > limits::digits10 + 0UL && precision_bin <= streamsize_max + 0UL) ); BOOST_STATIC_CONSTANT(std::streamsize, value = is_specialized_bin ? precision_bin : is_specialized_dec ? precision_dec : 6 ); }; #endif template<class T> inline std::streamsize lcast_get_precision(T* = 0) { #ifndef BOOST_LCAST_NO_COMPILE_TIME_PRECISION return lcast_precision<T>::value; #else // Follow lcast_precision algorithm at run-time: #ifdef BOOST_NO_IS_ABSTRACT typedef std::numeric_limits<T> limits; // No fix for SF:1358600. #else typedef BOOST_DEDUCED_TYPENAME boost::mpl::if_< boost::is_abstract<T> , std::numeric_limits<lcast_abstract_stub> , std::numeric_limits<T> >::type limits; #endif bool const use_default_precision = !limits::is_specialized || limits::is_exact; if(!use_default_precision) { // Includes all built-in floating-point types, float, double ... // and UDT types for which digits (significand bits) is defined (not zero) bool const is_specialized_bin = limits::radix == 2 && limits::digits > 0; bool const is_specialized_dec = limits::radix == 10 && limits::digits10 > 0; std::streamsize const streamsize_max = (boost::integer_traits<std::streamsize>::max)(); if(is_specialized_bin) { // Floating-point types with // limits::digits defined by the specialization. unsigned long const digits = limits::digits; unsigned long const precision = 2UL + digits * 30103UL / 100000UL; // unsigned long is selected because it is at least 32-bits // and thus ULONG_MAX / 30103UL is big enough for all types. BOOST_ASSERT( digits < ULONG_MAX / 30103UL && precision > limits::digits10 + 0UL && precision <= streamsize_max + 0UL ); return precision; } else if(is_specialized_dec) { // Decimal Floating-point type, most likely a User Defined Type // rather than a real floating-point hardware type. unsigned int const precision = limits::digits10 + 1U; BOOST_ASSERT(precision <= streamsize_max + 0UL); return precision; } } // Integral type (for which precision has no effect) // or type T for which limits is NOT specialized, // so assume stream precision remains the default 6 decimal digits. // Warning: if your User-defined Floating-point type T is NOT specialized, // then you may lose accuracy by only using 6 decimal digits. // To avoid this, you need to specialize T with either // radix == 2 and digits == the number of significand bits, // OR // radix = 10 and digits10 == the number of decimal digits. return 6; #endif } template<class T> inline void lcast_set_precision(std::ios_base& stream, T*) { stream.precision(lcast_get_precision<T>()); } template<class Source, class Target> inline void lcast_set_precision(std::ios_base& stream, Source*, Target*) { std::streamsize const s = lcast_get_precision((Source*)0); std::streamsize const t = lcast_get_precision((Target*)0); stream.precision(s > t ? s : t); } }} #endif // BOOST_DETAIL_LCAST_PRECISION_HPP_INCLUDED
Ezeer/VegaStrike_win32FR
vegastrike/boost/1_35/boost/detail/lcast_precision.hpp
C++
mit
5,996
/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define DEBUG #include <linux/slab.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/types.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/sched.h> #include <linux/poll.h> #include <linux/pm.h> #include <linux/platform_device.h> #include <linux/uaccess.h> #include <linux/debugfs.h> #include <linux/rwsem.h> #include <linux/ipc_logging.h> #include <linux/uaccess.h> #include <linux/ipc_router.h> #include <linux/ipc_router_xprt.h> #include <mach/subsystem_notif.h> #include <asm/byteorder.h> #if defined(CONFIG_ARCH_MSM) #include <soc/qcom/smem_log.h> #endif #include "ipc_router_private.h" #include "ipc_router_security.h" enum { #if defined(CONFIG_ARCH_MSM) SMEM_LOG = 1U << 0, #endif RTR_DBG = 1U << 1, R2R_MSG = 1U << 2, R2R_RAW = 1U << 3, NTFY_MSG = 1U << 4, R2R_RAW_HDR = 1U << 5, }; static int msm_ipc_router_debug_mask; module_param_named(debug_mask, msm_ipc_router_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP); static void *ipc_rtr_log_ctxt; #define IPC_RTR_LOG_PAGES 5 #define DIAG(x...) pr_info("[RR] ERROR " x) #if defined(DEBUG) #define D(x...) do { \ if (ipc_rtr_log_ctxt) \ ipc_log_string(ipc_rtr_log_ctxt, x); \ if (msm_ipc_router_debug_mask & RTR_DBG) \ pr_info(x); \ } while (0) #define RR(x...) do { \ if (ipc_rtr_log_ctxt) \ ipc_log_string(ipc_rtr_log_ctxt, x); \ if (msm_ipc_router_debug_mask & R2R_MSG) \ pr_info("[RR] "x); \ } while (0) #define RAW(x...) do { \ if (msm_ipc_router_debug_mask & R2R_RAW) \ pr_info("[RAW] "x); \ } while (0) #define NTFY(x...) do { \ if (msm_ipc_router_debug_mask & NTFY_MSG) \ pr_info("[NOTIFY] "x); \ } while (0) #define RAW_HDR(x...) do { \ if (msm_ipc_router_debug_mask & R2R_RAW_HDR) \ pr_info("[HDR] "x); \ } while (0) #else #define D(x...) do { } while (0) #define RR(x...) do { } while (0) #define RAW(x...) do { } while (0) #define RAW_HDR(x...) do { } while (0) #define NTFY(x...) do { } while (0) #endif #define IPC_ROUTER_LOG_EVENT_ERROR 0x00 #define IPC_ROUTER_LOG_EVENT_TX 0x01 #define IPC_ROUTER_LOG_EVENT_RX 0x02 #define IPC_ROUTER_DUMMY_DEST_NODE 0xFFFFFFFF static LIST_HEAD(control_ports); static DECLARE_RWSEM(control_ports_lock_lha5); #define LP_HASH_SIZE 32 static struct list_head local_ports[LP_HASH_SIZE]; static DECLARE_RWSEM(local_ports_lock_lha2); /* Server info is organized as a hash table. The server's service ID is * used to index into the hash table. The instance ID of most of the servers * are 1 or 2. The service IDs are well distributed compared to the instance * IDs and hence choosing service ID to index into this hash table optimizes * the hash table operations like add, lookup, destroy. */ #define SRV_HASH_SIZE 32 static struct list_head server_list[SRV_HASH_SIZE]; static DECLARE_RWSEM(server_list_lock_lha2); struct msm_ipc_server { struct list_head list; struct msm_ipc_port_name name; char pdev_name[32]; int next_pdev_id; int synced_sec_rule; struct list_head server_port_list; }; struct msm_ipc_server_port { struct list_head list; struct platform_device pdev; struct msm_ipc_port_addr server_addr; struct msm_ipc_router_xprt_info *xprt_info; }; struct msm_ipc_resume_tx_port { struct list_head list; uint32_t port_id; uint32_t node_id; }; #define RP_HASH_SIZE 32 struct msm_ipc_router_remote_port { struct list_head list; uint32_t node_id; uint32_t port_id; uint32_t tx_quota_cnt; struct mutex quota_lock_lhb2; struct list_head resume_tx_port_list; void *sec_rule; struct msm_ipc_server *server; }; struct msm_ipc_router_xprt_info { struct list_head list; struct msm_ipc_router_xprt *xprt; uint32_t remote_node_id; uint32_t initialized; struct list_head pkt_list; struct wakeup_source ws; struct mutex rx_lock_lhb2; struct mutex tx_lock_lhb2; uint32_t need_len; uint32_t abort_data_read; struct work_struct read_data; struct workqueue_struct *workqueue; }; #define RT_HASH_SIZE 4 struct msm_ipc_routing_table_entry { struct list_head list; uint32_t node_id; uint32_t neighbor_node_id; struct list_head remote_port_list[RP_HASH_SIZE]; struct msm_ipc_router_xprt_info *xprt_info; struct rw_semaphore lock_lha4; unsigned long num_tx_bytes; unsigned long num_rx_bytes; }; static struct list_head routing_table[RT_HASH_SIZE]; static DECLARE_RWSEM(routing_table_lock_lha3); static int routing_table_inited; static void do_read_data(struct work_struct *work); static LIST_HEAD(xprt_info_list); static DECLARE_RWSEM(xprt_info_list_lock_lha5); static DECLARE_COMPLETION(msm_ipc_local_router_up); #define IPC_ROUTER_INIT_TIMEOUT (10 * HZ) static uint32_t next_port_id; static DEFINE_MUTEX(next_port_id_lock_lha1); static struct workqueue_struct *msm_ipc_router_workqueue; enum { DOWN, UP, }; static void init_routing_table(void) { int i; for (i = 0; i < RT_HASH_SIZE; i++) INIT_LIST_HEAD(&routing_table[i]); } static struct msm_ipc_routing_table_entry *alloc_routing_table_entry( uint32_t node_id) { int i; struct msm_ipc_routing_table_entry *rt_entry; rt_entry = kmalloc(sizeof(struct msm_ipc_routing_table_entry), GFP_KERNEL); if (!rt_entry) { IPC_RTR_ERR("%s: rt_entry allocation failed for %d\n", __func__, node_id); return NULL; } for (i = 0; i < RP_HASH_SIZE; i++) INIT_LIST_HEAD(&rt_entry->remote_port_list[i]); init_rwsem(&rt_entry->lock_lha4); rt_entry->node_id = node_id; rt_entry->xprt_info = NULL; return rt_entry; } /* Must be called with routing_table_lock_lha3 locked. */ static int add_routing_table_entry( struct msm_ipc_routing_table_entry *rt_entry) { uint32_t key; if (!rt_entry) return -EINVAL; key = (rt_entry->node_id % RT_HASH_SIZE); list_add_tail(&rt_entry->list, &routing_table[key]); return 0; } /* Must be called with routing_table_lock_lha3 locked. */ static struct msm_ipc_routing_table_entry *lookup_routing_table( uint32_t node_id) { uint32_t key = (node_id % RT_HASH_SIZE); struct msm_ipc_routing_table_entry *rt_entry; list_for_each_entry(rt_entry, &routing_table[key], list) { if (rt_entry->node_id == node_id) return rt_entry; } return NULL; } struct rr_packet *rr_read(struct msm_ipc_router_xprt_info *xprt_info) { struct rr_packet *temp_pkt; if (!xprt_info) return NULL; mutex_lock(&xprt_info->rx_lock_lhb2); if (xprt_info->abort_data_read) { mutex_unlock(&xprt_info->rx_lock_lhb2); IPC_RTR_ERR("%s detected SSR & exiting now\n", xprt_info->xprt->name); return NULL; } if (list_empty(&xprt_info->pkt_list)) { mutex_unlock(&xprt_info->rx_lock_lhb2); return NULL; } temp_pkt = list_first_entry(&xprt_info->pkt_list, struct rr_packet, list); list_del(&temp_pkt->list); if (list_empty(&xprt_info->pkt_list)) __pm_relax(&xprt_info->ws); mutex_unlock(&xprt_info->rx_lock_lhb2); return temp_pkt; } struct rr_packet *clone_pkt(struct rr_packet *pkt) { struct rr_packet *cloned_pkt; struct sk_buff *temp_skb, *cloned_skb; struct sk_buff_head *pkt_fragment_q; cloned_pkt = kzalloc(sizeof(struct rr_packet), GFP_KERNEL); if (!cloned_pkt) { IPC_RTR_ERR("%s: failure\n", __func__); return NULL; } memcpy(&(cloned_pkt->hdr), &(pkt->hdr), sizeof(struct rr_header_v1)); /* TODO: Copy optional headers, if available */ pkt_fragment_q = kmalloc(sizeof(struct sk_buff_head), GFP_KERNEL); if (!pkt_fragment_q) { IPC_RTR_ERR("%s: pkt_frag_q alloc failure\n", __func__); kfree(cloned_pkt); return NULL; } skb_queue_head_init(pkt_fragment_q); skb_queue_walk(pkt->pkt_fragment_q, temp_skb) { cloned_skb = skb_clone(temp_skb, GFP_KERNEL); if (!cloned_skb) goto fail_clone; skb_queue_tail(pkt_fragment_q, cloned_skb); } cloned_pkt->pkt_fragment_q = pkt_fragment_q; cloned_pkt->length = pkt->length; return cloned_pkt; fail_clone: while (!skb_queue_empty(pkt_fragment_q)) { temp_skb = skb_dequeue(pkt_fragment_q); kfree_skb(temp_skb); } kfree(pkt_fragment_q); /* TODO: Free optional headers, if present */ kfree(cloned_pkt); return NULL; } /** * create_pkt() - Create a Router packet * @data: SKB queue to be contained inside the packet. * * @return: pointer to packet on success, NULL on failure. */ struct rr_packet *create_pkt(struct sk_buff_head *data) { struct rr_packet *pkt; struct sk_buff *temp_skb; pkt = kzalloc(sizeof(struct rr_packet), GFP_KERNEL); if (!pkt) { IPC_RTR_ERR("%s: failure\n", __func__); return NULL; } if (data) { pkt->pkt_fragment_q = data; skb_queue_walk(pkt->pkt_fragment_q, temp_skb) pkt->length += temp_skb->len; } else { pkt->pkt_fragment_q = kmalloc(sizeof(struct sk_buff_head), GFP_KERNEL); if (!pkt->pkt_fragment_q) { IPC_RTR_ERR("%s: Couldn't alloc pkt_fragment_q\n", __func__); kfree(pkt); return NULL; } skb_queue_head_init(pkt->pkt_fragment_q); } return pkt; } void release_pkt(struct rr_packet *pkt) { struct sk_buff *temp_skb; if (!pkt) return; if (!pkt->pkt_fragment_q) { kfree(pkt); return; } while (!skb_queue_empty(pkt->pkt_fragment_q)) { temp_skb = skb_dequeue(pkt->pkt_fragment_q); kfree_skb(temp_skb); } kfree(pkt->pkt_fragment_q); /* TODO: Free Optional headers, if present */ kfree(pkt); return; } static struct sk_buff_head *msm_ipc_router_buf_to_skb(void *buf, unsigned int buf_len) { struct sk_buff_head *skb_head; struct sk_buff *skb; int first = 1, offset = 0; int skb_size, data_size; void *data; int last = 1; int align_size; skb_head = kmalloc(sizeof(struct sk_buff_head), GFP_KERNEL); if (!skb_head) { IPC_RTR_ERR("%s: Couldnot allocate skb_head\n", __func__); return NULL; } skb_queue_head_init(skb_head); data_size = buf_len; align_size = ALIGN_SIZE(data_size); while (offset != buf_len) { skb_size = data_size; if (first) skb_size += IPC_ROUTER_HDR_SIZE; if (last) skb_size += align_size; skb = alloc_skb(skb_size, GFP_KERNEL); if (!skb) { if (skb_size <= (PAGE_SIZE/2)) { IPC_RTR_ERR("%s: cannot allocate skb\n", __func__); goto buf_to_skb_error; } data_size = data_size / 2; last = 0; continue; } if (first) { skb_reserve(skb, IPC_ROUTER_HDR_SIZE); first = 0; } data = skb_put(skb, data_size); memcpy(skb->data, buf + offset, data_size); skb_queue_tail(skb_head, skb); offset += data_size; data_size = buf_len - offset; last = 1; } return skb_head; buf_to_skb_error: while (!skb_queue_empty(skb_head)) { skb = skb_dequeue(skb_head); kfree_skb(skb); } kfree(skb_head); return NULL; } static void *msm_ipc_router_skb_to_buf(struct sk_buff_head *skb_head, unsigned int len) { struct sk_buff *temp; unsigned int offset = 0, buf_len = 0, copy_len; void *buf; if (!skb_head) { IPC_RTR_ERR("%s: NULL skb_head\n", __func__); return NULL; } temp = skb_peek(skb_head); buf_len = len; buf = kmalloc(buf_len, GFP_KERNEL); if (!buf) { IPC_RTR_ERR("%s: cannot allocate buf\n", __func__); return NULL; } skb_queue_walk(skb_head, temp) { copy_len = buf_len < temp->len ? buf_len : temp->len; memcpy(buf + offset, temp->data, copy_len); offset += copy_len; buf_len -= copy_len; } return buf; } void msm_ipc_router_free_skb(struct sk_buff_head *skb_head) { struct sk_buff *temp_skb; if (!skb_head) return; while (!skb_queue_empty(skb_head)) { temp_skb = skb_dequeue(skb_head); kfree_skb(temp_skb); } kfree(skb_head); } /** * extract_header_v1() - Extract IPC Router header of version 1 * @pkt: Packet structure into which the header has to be extraced. * @skb: SKB from which the header has to be extracted. * * @return: 0 on success, standard Linux error codes on failure. */ static int extract_header_v1(struct rr_packet *pkt, struct sk_buff *skb) { if (!pkt || !skb) { IPC_RTR_ERR("%s: Invalid pkt or skb\n", __func__); return -EINVAL; } memcpy(&pkt->hdr, skb->data, sizeof(struct rr_header_v1)); skb_pull(skb, sizeof(struct rr_header_v1)); pkt->length -= sizeof(struct rr_header_v1); return 0; } /** * extract_header_v2() - Extract IPC Router header of version 2 * @pkt: Packet structure into which the header has to be extraced. * @skb: SKB from which the header has to be extracted. * * @return: 0 on success, standard Linux error codes on failure. */ static int extract_header_v2(struct rr_packet *pkt, struct sk_buff *skb) { struct rr_header_v2 *hdr; if (!pkt || !skb) { IPC_RTR_ERR("%s: Invalid pkt or skb\n", __func__); return -EINVAL; } hdr = (struct rr_header_v2 *)skb->data; pkt->hdr.version = (uint32_t)hdr->version; pkt->hdr.type = (uint32_t)hdr->type; pkt->hdr.src_node_id = (uint32_t)hdr->src_node_id; pkt->hdr.src_port_id = (uint32_t)hdr->src_port_id; pkt->hdr.size = (uint32_t)hdr->size; pkt->hdr.control_flag = (uint32_t)hdr->control_flag; pkt->hdr.dst_node_id = (uint32_t)hdr->dst_node_id; pkt->hdr.dst_port_id = (uint32_t)hdr->dst_port_id; skb_pull(skb, sizeof(struct rr_header_v2)); pkt->length -= sizeof(struct rr_header_v2); return 0; } /** * extract_header() - Extract IPC Router header * @pkt: Packet from which the header has to be extraced. * * @return: 0 on success, standard Linux error codes on failure. * * This function will check if the header version is v1 or v2 and invoke * the corresponding helper function to extract the IPC Router header. */ static int extract_header(struct rr_packet *pkt) { struct sk_buff *temp_skb; int ret; if (!pkt) { IPC_RTR_ERR("%s: NULL PKT\n", __func__); return -EINVAL; } temp_skb = skb_peek(pkt->pkt_fragment_q); if (!temp_skb || !temp_skb->data) { IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__); return -EINVAL; } if (temp_skb->data[0] == IPC_ROUTER_V1) { ret = extract_header_v1(pkt, temp_skb); } else if (temp_skb->data[0] == IPC_ROUTER_V2) { ret = extract_header_v2(pkt, temp_skb); /* TODO: Extract optional headers if present */ } else { IPC_RTR_ERR("%s: Invalid Header version %02x\n", __func__, temp_skb->data[0]); print_hex_dump(KERN_ERR, "Header: ", DUMP_PREFIX_ADDRESS, 16, 1, temp_skb->data, pkt->length, true); return -EINVAL; } return ret; } /** * calc_tx_header_size() - Calculate header size to be reserved in SKB * @pkt: Packet in which the space for header has to be reserved. * @dst_xprt_info: XPRT through which the destination is reachable. * * @return: required header size on success, * starndard Linux error codes on failure. * * This function is used to calculate the header size that has to be reserved * in a transmit SKB. The header size is calculated based on the XPRT through * which the destination node is reachable. */ static int calc_tx_header_size(struct rr_packet *pkt, struct msm_ipc_router_xprt_info *dst_xprt_info) { int hdr_size = 0; int xprt_version = 0; struct msm_ipc_routing_table_entry *rt_entry; struct msm_ipc_router_xprt_info *xprt_info = dst_xprt_info; if (!pkt) { IPC_RTR_ERR("%s: NULL PKT\n", __func__); return -EINVAL; } if (!xprt_info) { rt_entry = lookup_routing_table(pkt->hdr.dst_node_id); if (!rt_entry || !(rt_entry->xprt_info)) { IPC_RTR_ERR("%s: Node %d is not up\n", __func__, pkt->hdr.dst_node_id); return -ENODEV; } xprt_info = rt_entry->xprt_info; } if (xprt_info) xprt_version = xprt_info->xprt->get_version(xprt_info->xprt); if (xprt_version == IPC_ROUTER_V1) { pkt->hdr.version = IPC_ROUTER_V1; hdr_size = sizeof(struct rr_header_v1); } else if (xprt_version == IPC_ROUTER_V2) { pkt->hdr.version = IPC_ROUTER_V2; hdr_size = sizeof(struct rr_header_v2); /* TODO: Calculate optional header length, if present */ } else { IPC_RTR_ERR("%s: Invalid xprt_version %d\n", __func__, xprt_version); hdr_size = -EINVAL; } return hdr_size; } /** * calc_rx_header_size() - Calculate the RX header size * @xprt_info: XPRT info of the received message. * * @return: valid header size on success, INT_MAX on failure. */ static int calc_rx_header_size(struct msm_ipc_router_xprt_info *xprt_info) { int xprt_version = 0; int hdr_size = INT_MAX; if (xprt_info) xprt_version = xprt_info->xprt->get_version(xprt_info->xprt); if (xprt_version == IPC_ROUTER_V1) hdr_size = sizeof(struct rr_header_v1); else if (xprt_version == IPC_ROUTER_V2) hdr_size = sizeof(struct rr_header_v2); return hdr_size; } /** * prepend_header_v1() - Prepend IPC Router header of version 1 * @pkt: Packet structure which contains the header info to be prepended. * @hdr_size: Size of the header * * @return: 0 on success, standard Linux error codes on failure. */ static int prepend_header_v1(struct rr_packet *pkt, int hdr_size) { struct sk_buff *temp_skb; struct rr_header_v1 *hdr; if (!pkt || hdr_size <= 0) { IPC_RTR_ERR("%s: Invalid input parameters\n", __func__); return -EINVAL; } temp_skb = skb_peek(pkt->pkt_fragment_q); if (!temp_skb || !temp_skb->data) { IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__); return -EINVAL; } if (skb_headroom(temp_skb) < hdr_size) { temp_skb = alloc_skb(hdr_size, GFP_KERNEL); if (!temp_skb) { IPC_RTR_ERR("%s: Could not allocate SKB of size %d\n", __func__, hdr_size); return -ENOMEM; } skb_reserve(temp_skb, hdr_size); } hdr = (struct rr_header_v1 *)skb_push(temp_skb, hdr_size); memcpy(hdr, &pkt->hdr, hdr_size); if (temp_skb != skb_peek(pkt->pkt_fragment_q)) skb_queue_head(pkt->pkt_fragment_q, temp_skb); pkt->length += hdr_size; return 0; } /** * prepend_header_v2() - Prepend IPC Router header of version 2 * @pkt: Packet structure which contains the header info to be prepended. * @hdr_size: Size of the header * * @return: 0 on success, standard Linux error codes on failure. */ static int prepend_header_v2(struct rr_packet *pkt, int hdr_size) { struct sk_buff *temp_skb; struct rr_header_v2 *hdr; if (!pkt || hdr_size <= 0) { IPC_RTR_ERR("%s: Invalid input parameters\n", __func__); return -EINVAL; } temp_skb = skb_peek(pkt->pkt_fragment_q); if (!temp_skb || !temp_skb->data) { IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__); return -EINVAL; } if (skb_headroom(temp_skb) < hdr_size) { temp_skb = alloc_skb(hdr_size, GFP_KERNEL); if (!temp_skb) { IPC_RTR_ERR("%s: Could not allocate SKB of size %d\n", __func__, hdr_size); return -ENOMEM; } skb_reserve(temp_skb, hdr_size); } hdr = (struct rr_header_v2 *)skb_push(temp_skb, hdr_size); hdr->version = (uint8_t)pkt->hdr.version; hdr->type = (uint8_t)pkt->hdr.type; hdr->control_flag = (uint16_t)pkt->hdr.control_flag; hdr->size = (uint32_t)pkt->hdr.size; hdr->src_node_id = (uint16_t)pkt->hdr.src_node_id; hdr->src_port_id = (uint16_t)pkt->hdr.src_port_id; hdr->dst_node_id = (uint16_t)pkt->hdr.dst_node_id; hdr->dst_port_id = (uint16_t)pkt->hdr.dst_port_id; /* TODO: Add optional headers, if present */ if (temp_skb != skb_peek(pkt->pkt_fragment_q)) skb_queue_head(pkt->pkt_fragment_q, temp_skb); pkt->length += hdr_size; return 0; } /** * prepend_header() - Prepend IPC Router header * @pkt: Packet structure which contains the header info to be prepended. * @xprt_info: XPRT through which the packet is transmitted. * * @return: 0 on success, standard Linux error codes on failure. * * This function prepends the header to the packet to be transmitted. The * IPC Router header version to be prepended depends on the XPRT through * which the destination is reachable. */ static int prepend_header(struct rr_packet *pkt, struct msm_ipc_router_xprt_info *xprt_info) { int hdr_size; struct sk_buff *temp_skb; if (!pkt) { IPC_RTR_ERR("%s: NULL PKT\n", __func__); return -EINVAL; } temp_skb = skb_peek(pkt->pkt_fragment_q); if (!temp_skb || !temp_skb->data) { IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__); return -EINVAL; } hdr_size = calc_tx_header_size(pkt, xprt_info); if (hdr_size <= 0) return hdr_size; if (pkt->hdr.version == IPC_ROUTER_V1) return prepend_header_v1(pkt, hdr_size); else if (pkt->hdr.version == IPC_ROUTER_V2) return prepend_header_v2(pkt, hdr_size); else return -EINVAL; } /** * defragment_pkt() - Defragment and linearize the packet * @pkt: Packet to be linearized. * * @return: 0 on success, standard Linux error codes on failure. * * Some packets contain fragments of data over multiple SKBs. If an XPRT * does not supported fragmented writes, linearize multiple SKBs into one * single SKB. */ static int defragment_pkt(struct rr_packet *pkt) { struct sk_buff *dst_skb, *src_skb, *temp_skb; int offset = 0, buf_len = 0, copy_len; void *buf; int align_size; if (!pkt || pkt->length <= 0) { IPC_RTR_ERR("%s: Invalid PKT\n", __func__); return -EINVAL; } if (skb_queue_len(pkt->pkt_fragment_q) == 1) return 0; align_size = ALIGN_SIZE(pkt->length); dst_skb = alloc_skb(pkt->length + align_size, GFP_KERNEL); if (!dst_skb) { IPC_RTR_ERR("%s: could not allocate one skb of size %d\n", __func__, pkt->length); return -ENOMEM; } buf = skb_put(dst_skb, pkt->length); buf_len = pkt->length; skb_queue_walk(pkt->pkt_fragment_q, src_skb) { copy_len = buf_len < src_skb->len ? buf_len : src_skb->len; memcpy(buf + offset, src_skb->data, copy_len); offset += copy_len; buf_len -= copy_len; } while (!skb_queue_empty(pkt->pkt_fragment_q)) { temp_skb = skb_dequeue(pkt->pkt_fragment_q); kfree_skb(temp_skb); } skb_queue_tail(pkt->pkt_fragment_q, dst_skb); return 0; } static int post_pkt_to_port(struct msm_ipc_port *port_ptr, struct rr_packet *pkt, int clone) { struct rr_packet *temp_pkt = pkt; void (*notify)(unsigned event, void *oob_data, size_t oob_data_len, void *priv); if (unlikely(!port_ptr || !pkt)) return -EINVAL; if (clone) { temp_pkt = clone_pkt(pkt); if (!temp_pkt) { IPC_RTR_ERR( "%s: Error cloning packet for port %08x:%08x\n", __func__, port_ptr->this_port.node_id, port_ptr->this_port.port_id); return -ENOMEM; } } mutex_lock(&port_ptr->port_rx_q_lock_lhb3); __pm_stay_awake(&port_ptr->port_rx_ws); list_add_tail(&temp_pkt->list, &port_ptr->port_rx_q); wake_up(&port_ptr->port_rx_wait_q); notify = port_ptr->notify; mutex_unlock(&port_ptr->port_rx_q_lock_lhb3); if (notify) notify(pkt->hdr.type, NULL, 0, port_ptr->priv); return 0; } /** * ipc_router_peek_pkt_size() - Peek into the packet header to get potential packet size * @data: Starting address of the packet which points to router header. * * @returns: potential packet size on success, < 0 on error. * * This function is used by the underlying transport abstraction layer to * peek into the potential packet size of an incoming packet. This information * is used to perform link layer fragmentation and re-assembly */ int ipc_router_peek_pkt_size(char *data) { int size; if (!data) { pr_err("%s: NULL PKT\n", __func__); return -EINVAL; } /* FUTURE: Calculate optional header len in V2 header*/ if (data[0] == IPC_ROUTER_V1) size = ((struct rr_header_v1 *)data)->size + sizeof(struct rr_header_v1); else if (data[0] == IPC_ROUTER_V2) size = ((struct rr_header_v2 *)data)->size + sizeof(struct rr_header_v2); else return -EINVAL; size += ALIGN_SIZE(size); return size; } static int post_control_ports(struct rr_packet *pkt) { struct msm_ipc_port *port_ptr; if (!pkt) return -EINVAL; down_read(&control_ports_lock_lha5); list_for_each_entry(port_ptr, &control_ports, list) post_pkt_to_port(port_ptr, pkt, 1); up_read(&control_ports_lock_lha5); return 0; } static uint32_t allocate_port_id(void) { uint32_t port_id = 0, prev_port_id, key; struct msm_ipc_port *port_ptr; mutex_lock(&next_port_id_lock_lha1); prev_port_id = next_port_id; down_read(&local_ports_lock_lha2); do { next_port_id++; if ((next_port_id & IPC_ROUTER_ADDRESS) == IPC_ROUTER_ADDRESS) next_port_id = 1; key = (next_port_id & (LP_HASH_SIZE - 1)); if (list_empty(&local_ports[key])) { port_id = next_port_id; break; } list_for_each_entry(port_ptr, &local_ports[key], list) { if (port_ptr->this_port.port_id == next_port_id) { port_id = next_port_id; break; } } if (!port_id) { port_id = next_port_id; break; } port_id = 0; } while (next_port_id != prev_port_id); up_read(&local_ports_lock_lha2); mutex_unlock(&next_port_id_lock_lha1); return port_id; } void msm_ipc_router_add_local_port(struct msm_ipc_port *port_ptr) { uint32_t key; if (!port_ptr) return; key = (port_ptr->this_port.port_id & (LP_HASH_SIZE - 1)); down_write(&local_ports_lock_lha2); list_add_tail(&port_ptr->list, &local_ports[key]); up_write(&local_ports_lock_lha2); } /** * msm_ipc_router_create_raw_port() - Create an IPC Router port * @endpoint: User-space space socket information to be cached. * @notify: Function to notify incoming events on the port. * @event: Event ID to be handled. * @oob_data: Any out-of-band data associated with the event. * @oob_data_len: Size of the out-of-band data, if valid. * @priv: Private data registered during the port creation. * @priv: Private Data to be passed during the event notification. * * @return: Valid pointer to port on success, NULL on failure. * * This function is used to create an IPC Router port. The port is used for * communication locally or outside the subsystem. */ struct msm_ipc_port *msm_ipc_router_create_raw_port(void *endpoint, void (*notify)(unsigned event, void *oob_data, size_t oob_data_len, void *priv), void *priv) { struct msm_ipc_port *port_ptr; port_ptr = kzalloc(sizeof(struct msm_ipc_port), GFP_KERNEL); if (!port_ptr) return NULL; port_ptr->this_port.node_id = IPC_ROUTER_NID_LOCAL; port_ptr->this_port.port_id = allocate_port_id(); if (!port_ptr->this_port.port_id) { IPC_RTR_ERR("%s: All port ids are in use\n", __func__); kfree(port_ptr); return NULL; } mutex_init(&port_ptr->port_lock_lhb1); INIT_LIST_HEAD(&port_ptr->port_rx_q); mutex_init(&port_ptr->port_rx_q_lock_lhb3); init_waitqueue_head(&port_ptr->port_rx_wait_q); snprintf(port_ptr->rx_ws_name, MAX_WS_NAME_SZ, "ipc%08x_%s", port_ptr->this_port.port_id, current->comm); wakeup_source_init(&port_ptr->port_rx_ws, port_ptr->rx_ws_name); port_ptr->endpoint = endpoint; port_ptr->notify = notify; port_ptr->priv = priv; msm_ipc_router_add_local_port(port_ptr); return port_ptr; } /* Must be called with local_ports_lock_lha2 locked. */ static struct msm_ipc_port *msm_ipc_router_lookup_local_port(uint32_t port_id) { int key = (port_id & (LP_HASH_SIZE - 1)); struct msm_ipc_port *port_ptr; list_for_each_entry(port_ptr, &local_ports[key], list) { if (port_ptr->this_port.port_id == port_id) return port_ptr; } return NULL; } /* Must be called with routing_table_lock_lha3 locked. */ static struct msm_ipc_router_remote_port *msm_ipc_router_lookup_remote_port( uint32_t node_id, uint32_t port_id) { struct msm_ipc_router_remote_port *rport_ptr; struct msm_ipc_routing_table_entry *rt_entry; int key = (port_id & (RP_HASH_SIZE - 1)); rt_entry = lookup_routing_table(node_id); if (!rt_entry) { IPC_RTR_ERR("%s: Node is not up\n", __func__); return NULL; } down_read(&rt_entry->lock_lha4); list_for_each_entry(rport_ptr, &rt_entry->remote_port_list[key], list) { if (rport_ptr->port_id == port_id) { up_read(&rt_entry->lock_lha4); return rport_ptr; } } up_read(&rt_entry->lock_lha4); return NULL; } /* Must be called with routing_table_lock_lha3 locked. */ static struct msm_ipc_router_remote_port *msm_ipc_router_create_remote_port( uint32_t node_id, uint32_t port_id) { struct msm_ipc_router_remote_port *rport_ptr; struct msm_ipc_routing_table_entry *rt_entry; int key = (port_id & (RP_HASH_SIZE - 1)); rt_entry = lookup_routing_table(node_id); if (!rt_entry) { IPC_RTR_ERR("%s: Node is not up\n", __func__); return NULL; } rport_ptr = kmalloc(sizeof(struct msm_ipc_router_remote_port), GFP_KERNEL); if (!rport_ptr) { IPC_RTR_ERR("%s: Remote port alloc failed\n", __func__); return NULL; } rport_ptr->port_id = port_id; rport_ptr->node_id = node_id; rport_ptr->sec_rule = NULL; rport_ptr->server = NULL; rport_ptr->tx_quota_cnt = 0; mutex_init(&rport_ptr->quota_lock_lhb2); INIT_LIST_HEAD(&rport_ptr->resume_tx_port_list); down_write(&rt_entry->lock_lha4); list_add_tail(&rport_ptr->list, &rt_entry->remote_port_list[key]); up_write(&rt_entry->lock_lha4); return rport_ptr; } /** * msm_ipc_router_free_resume_tx_port() - Free the resume_tx ports * @rport_ptr: Pointer to the remote port. * * This function deletes all the resume_tx ports associated with a remote port * and frees the memory allocated to each resume_tx port. * * Must be called with rport_ptr->quota_lock_lhb2 locked. */ static void msm_ipc_router_free_resume_tx_port( struct msm_ipc_router_remote_port *rport_ptr) { struct msm_ipc_resume_tx_port *rtx_port, *tmp_rtx_port; list_for_each_entry_safe(rtx_port, tmp_rtx_port, &rport_ptr->resume_tx_port_list, list) { list_del(&rtx_port->list); kfree(rtx_port); } } /** * msm_ipc_router_lookup_resume_tx_port() - Lookup resume_tx port list * @rport_ptr: Remote port whose resume_tx port list needs to be looked. * @port_id: Port ID which needs to be looked from the list. * * return 1 if the port_id is found in the list, else 0. * * This function is used to lookup the existence of a local port in * remote port's resume_tx list. This function is used to ensure that * the same port is not added to the remote_port's resume_tx list repeatedly. * * Must be called with rport_ptr->quota_lock_lhb2 locked. */ static int msm_ipc_router_lookup_resume_tx_port( struct msm_ipc_router_remote_port *rport_ptr, uint32_t port_id) { struct msm_ipc_resume_tx_port *rtx_port; list_for_each_entry(rtx_port, &rport_ptr->resume_tx_port_list, list) { if (port_id == rtx_port->port_id) return 1; } return 0; } /** * post_resume_tx() - Post the resume_tx event * @rport_ptr: Pointer to the remote port * @pkt : The data packet that is received on a resume_tx event * @msg: Out of band data to be passed to kernel drivers * * This function informs about the reception of the resume_tx message from a * remote port pointed by rport_ptr to all the local ports that are in the * resume_tx_ports_list of this remote port. On posting the information, this * function sequentially deletes each entry in the resume_tx_port_list of the * remote port. * * Must be called with rport_ptr->quota_lock_lhb2 locked. */ static void post_resume_tx(struct msm_ipc_router_remote_port *rport_ptr, struct rr_packet *pkt, union rr_control_msg *msg) { struct msm_ipc_resume_tx_port *rtx_port, *tmp_rtx_port; struct msm_ipc_port *local_port; list_for_each_entry_safe(rtx_port, tmp_rtx_port, &rport_ptr->resume_tx_port_list, list) { local_port = msm_ipc_router_lookup_local_port(rtx_port->port_id); if (local_port && local_port->notify) local_port->notify(IPC_ROUTER_CTRL_CMD_RESUME_TX, msg, sizeof(*msg), local_port->priv); else if (local_port) post_pkt_to_port(local_port, pkt, 1); else IPC_RTR_ERR("%s: Local Port %d not Found", __func__, rtx_port->port_id); list_del(&rtx_port->list); kfree(rtx_port); } } /* Must be called with routing_table_lock_lha3 locked. */ static void msm_ipc_router_destroy_remote_port( struct msm_ipc_router_remote_port *rport_ptr) { uint32_t node_id; struct msm_ipc_routing_table_entry *rt_entry; if (!rport_ptr) return; node_id = rport_ptr->node_id; rt_entry = lookup_routing_table(node_id); if (!rt_entry) { IPC_RTR_ERR("%s: Node %d is not up\n", __func__, node_id); return; } down_write(&rt_entry->lock_lha4); list_del(&rport_ptr->list); up_write(&rt_entry->lock_lha4); mutex_lock(&rport_ptr->quota_lock_lhb2); msm_ipc_router_free_resume_tx_port(rport_ptr); mutex_unlock(&rport_ptr->quota_lock_lhb2); kfree(rport_ptr); return; } /** * msm_ipc_router_lookup_server() - Lookup server information * @service: Service ID of the server info to be looked up. * @instance: Instance ID of the server info to be looked up. * @node_id: Node/Processor ID in which the server is hosted. * @port_id: Port ID within the node in which the server is hosted. * * @return: If found Pointer to server structure, else NULL. * * Note1: Lock the server_list_lock_lha2 before accessing this function. * Note2: If the <node_id:port_id> are <0:0>, then the lookup is restricted * to <service:instance>. Used only when a client wants to send a * message to any QMI server. */ static struct msm_ipc_server *msm_ipc_router_lookup_server( uint32_t service, uint32_t instance, uint32_t node_id, uint32_t port_id) { struct msm_ipc_server *server; struct msm_ipc_server_port *server_port; int key = (service & (SRV_HASH_SIZE - 1)); list_for_each_entry(server, &server_list[key], list) { if ((server->name.service != service) || (server->name.instance != instance)) continue; if ((node_id == 0) && (port_id == 0)) return server; list_for_each_entry(server_port, &server->server_port_list, list) { if ((server_port->server_addr.node_id == node_id) && (server_port->server_addr.port_id == port_id)) return server; } } return NULL; } static void dummy_release(struct device *dev) { } /** * msm_ipc_router_create_server() - Add server info to hash table * @service: Service ID of the server info to be created. * @instance: Instance ID of the server info to be created. * @node_id: Node/Processor ID in which the server is hosted. * @port_id: Port ID within the node in which the server is hosted. * @xprt_info: XPRT through which the node hosting the server is reached. * * @return: Pointer to server structure on success, else NULL. * * This function adds the server info to the hash table. If the same * server(i.e. <service_id:instance_id>) is hosted in different nodes, * they are maintained as list of "server_port" under "server" structure. * Note: Lock the server_list_lock_lha2 before accessing this function. */ static struct msm_ipc_server *msm_ipc_router_create_server( uint32_t service, uint32_t instance, uint32_t node_id, uint32_t port_id, struct msm_ipc_router_xprt_info *xprt_info) { struct msm_ipc_server *server = NULL; struct msm_ipc_server_port *server_port; int key = (service & (SRV_HASH_SIZE - 1)); list_for_each_entry(server, &server_list[key], list) { if ((server->name.service == service) && (server->name.instance == instance)) goto create_srv_port; } server = kzalloc(sizeof(struct msm_ipc_server), GFP_KERNEL); if (!server) { IPC_RTR_ERR("%s: Server allocation failed\n", __func__); return NULL; } server->name.service = service; server->name.instance = instance; server->synced_sec_rule = 0; INIT_LIST_HEAD(&server->server_port_list); list_add_tail(&server->list, &server_list[key]); scnprintf(server->pdev_name, sizeof(server->pdev_name), "QMI%08x:%08x", service, instance); server->next_pdev_id = 1; create_srv_port: server_port = kzalloc(sizeof(struct msm_ipc_server_port), GFP_KERNEL); if (!server_port) { if (list_empty(&server->server_port_list)) { list_del(&server->list); kfree(server); } IPC_RTR_ERR("%s: Server Port allocation failed\n", __func__); return NULL; } server_port->server_addr.node_id = node_id; server_port->server_addr.port_id = port_id; server_port->xprt_info = xprt_info; list_add_tail(&server_port->list, &server->server_port_list); server_port->pdev.name = server->pdev_name; server_port->pdev.id = server->next_pdev_id++; server_port->pdev.dev.release = dummy_release; platform_device_register(&server_port->pdev); return server; } /** * msm_ipc_router_destroy_server() - Remove server info from hash table * @server: Server info to be removed. * @node_id: Node/Processor ID in which the server is hosted. * @port_id: Port ID within the node in which the server is hosted. * * This function removes the server_port identified using <node_id:port_id> * from the server structure. If the server_port list under server structure * is empty after removal, then remove the server structure from the server * hash table. * Note: Lock the server_list_lock_lha2 before accessing this function. */ static void msm_ipc_router_destroy_server(struct msm_ipc_server *server, uint32_t node_id, uint32_t port_id) { struct msm_ipc_server_port *server_port; if (!server) return; list_for_each_entry(server_port, &server->server_port_list, list) { if ((server_port->server_addr.node_id == node_id) && (server_port->server_addr.port_id == port_id)) break; } if (server_port) { platform_device_unregister(&server_port->pdev); list_del(&server_port->list); kfree(server_port); } if (list_empty(&server->server_port_list)) { list_del(&server->list); kfree(server); } return; } static int msm_ipc_router_send_control_msg( struct msm_ipc_router_xprt_info *xprt_info, union rr_control_msg *msg, uint32_t dst_node_id) { struct rr_packet *pkt; struct sk_buff *ipc_rtr_pkt; struct rr_header_v1 *hdr; int pkt_size; void *data; struct sk_buff_head *pkt_fragment_q; int ret; if (!xprt_info || ((msg->cmd != IPC_ROUTER_CTRL_CMD_HELLO) && !xprt_info->initialized)) { IPC_RTR_ERR("%s: xprt_info not initialized\n", __func__); return -EINVAL; } if (xprt_info->remote_node_id == IPC_ROUTER_NID_LOCAL) return 0; pkt = kzalloc(sizeof(struct rr_packet), GFP_KERNEL); if (!pkt) { IPC_RTR_ERR("%s: pkt alloc failed\n", __func__); return -ENOMEM; } pkt_fragment_q = kmalloc(sizeof(struct sk_buff_head), GFP_KERNEL); if (!pkt_fragment_q) { IPC_RTR_ERR("%s: pkt_fragment_q alloc failed\n", __func__); kfree(pkt); return -ENOMEM; } skb_queue_head_init(pkt_fragment_q); pkt_size = IPC_ROUTER_HDR_SIZE + sizeof(*msg); ipc_rtr_pkt = alloc_skb(pkt_size, GFP_KERNEL); if (!ipc_rtr_pkt) { IPC_RTR_ERR("%s: ipc_rtr_pkt alloc failed\n", __func__); kfree(pkt_fragment_q); kfree(pkt); return -ENOMEM; } skb_reserve(ipc_rtr_pkt, IPC_ROUTER_HDR_SIZE); data = skb_put(ipc_rtr_pkt, sizeof(*msg)); memcpy(data, msg, sizeof(*msg)); skb_queue_tail(pkt_fragment_q, ipc_rtr_pkt); pkt->pkt_fragment_q = pkt_fragment_q; pkt->length = sizeof(*msg); hdr = &(pkt->hdr); hdr->version = IPC_ROUTER_V1; hdr->type = msg->cmd; hdr->src_node_id = IPC_ROUTER_NID_LOCAL; hdr->src_port_id = IPC_ROUTER_ADDRESS; hdr->control_flag = 0; hdr->size = sizeof(*msg); if (hdr->type == IPC_ROUTER_CTRL_CMD_RESUME_TX) hdr->dst_node_id = dst_node_id; else hdr->dst_node_id = xprt_info->remote_node_id; hdr->dst_port_id = IPC_ROUTER_ADDRESS; mutex_lock(&xprt_info->tx_lock_lhb2); ret = prepend_header(pkt, xprt_info); if (ret < 0) { mutex_unlock(&xprt_info->tx_lock_lhb2); IPC_RTR_ERR("%s: Prepend Header failed\n", __func__); release_pkt(pkt); return ret; } ret = xprt_info->xprt->write(pkt, pkt->length, xprt_info->xprt); mutex_unlock(&xprt_info->tx_lock_lhb2); release_pkt(pkt); return ret; } static int msm_ipc_router_send_server_list(uint32_t node_id, struct msm_ipc_router_xprt_info *xprt_info) { union rr_control_msg ctl; struct msm_ipc_server *server; struct msm_ipc_server_port *server_port; int i; if (!xprt_info || !xprt_info->initialized) { IPC_RTR_ERR("%s: Xprt info not initialized\n", __func__); return -EINVAL; } memset(&ctl, 0, sizeof(ctl)); ctl.cmd = IPC_ROUTER_CTRL_CMD_NEW_SERVER; for (i = 0; i < SRV_HASH_SIZE; i++) { list_for_each_entry(server, &server_list[i], list) { ctl.srv.service = server->name.service; ctl.srv.instance = server->name.instance; list_for_each_entry(server_port, &server->server_port_list, list) { if (server_port->server_addr.node_id != node_id) continue; ctl.srv.node_id = server_port->server_addr.node_id; ctl.srv.port_id = server_port->server_addr.port_id; msm_ipc_router_send_control_msg(xprt_info, &ctl, IPC_ROUTER_DUMMY_DEST_NODE); } } } return 0; } #if defined(DEBUG) static char *type_to_str(int i) { switch (i) { case IPC_ROUTER_CTRL_CMD_DATA: return "data "; case IPC_ROUTER_CTRL_CMD_HELLO: return "hello "; case IPC_ROUTER_CTRL_CMD_BYE: return "bye "; case IPC_ROUTER_CTRL_CMD_NEW_SERVER: return "new_srvr"; case IPC_ROUTER_CTRL_CMD_REMOVE_SERVER: return "rmv_srvr"; case IPC_ROUTER_CTRL_CMD_REMOVE_CLIENT: return "rmv_clnt"; case IPC_ROUTER_CTRL_CMD_RESUME_TX: return "resum_tx"; default: return "invalid"; } } #endif static int broadcast_ctl_msg_locally(union rr_control_msg *msg) { struct rr_packet *pkt; struct sk_buff *ipc_rtr_pkt; struct rr_header_v1 *hdr; int pkt_size; void *data; struct sk_buff_head *pkt_fragment_q; int ret; pkt = kzalloc(sizeof(struct rr_packet), GFP_KERNEL); if (!pkt) { IPC_RTR_ERR("%s: pkt alloc failed\n", __func__); return -ENOMEM; } pkt_fragment_q = kmalloc(sizeof(struct sk_buff_head), GFP_KERNEL); if (!pkt_fragment_q) { IPC_RTR_ERR("%s: pkt_fragment_q alloc failed\n", __func__); kfree(pkt); return -ENOMEM; } skb_queue_head_init(pkt_fragment_q); pkt_size = sizeof(*msg); ipc_rtr_pkt = alloc_skb(pkt_size, GFP_KERNEL); if (!ipc_rtr_pkt) { IPC_RTR_ERR("%s: ipc_rtr_pkt alloc failed\n", __func__); kfree(pkt_fragment_q); kfree(pkt); return -ENOMEM; } data = skb_put(ipc_rtr_pkt, sizeof(*msg)); memcpy(data, msg, sizeof(*msg)); hdr = &(pkt->hdr); hdr->version = IPC_ROUTER_V1; hdr->type = msg->cmd; hdr->src_node_id = IPC_ROUTER_NID_LOCAL; hdr->src_port_id = IPC_ROUTER_ADDRESS; hdr->control_flag = 0; hdr->size = sizeof(*msg); hdr->dst_node_id = IPC_ROUTER_NID_LOCAL; hdr->dst_port_id = IPC_ROUTER_ADDRESS; skb_queue_tail(pkt_fragment_q, ipc_rtr_pkt); pkt->pkt_fragment_q = pkt_fragment_q; pkt->length = pkt_size; ret = post_control_ports(pkt); release_pkt(pkt); return ret; } static int broadcast_ctl_msg(union rr_control_msg *ctl) { struct msm_ipc_router_xprt_info *xprt_info; down_read(&xprt_info_list_lock_lha5); list_for_each_entry(xprt_info, &xprt_info_list, list) { msm_ipc_router_send_control_msg(xprt_info, ctl, IPC_ROUTER_DUMMY_DEST_NODE); } up_read(&xprt_info_list_lock_lha5); return 0; } static int relay_ctl_msg(struct msm_ipc_router_xprt_info *xprt_info, union rr_control_msg *ctl) { struct msm_ipc_router_xprt_info *fwd_xprt_info; if (!xprt_info || !ctl) return -EINVAL; down_read(&xprt_info_list_lock_lha5); list_for_each_entry(fwd_xprt_info, &xprt_info_list, list) { if (xprt_info->xprt->link_id != fwd_xprt_info->xprt->link_id) msm_ipc_router_send_control_msg(fwd_xprt_info, ctl, IPC_ROUTER_DUMMY_DEST_NODE); } up_read(&xprt_info_list_lock_lha5); return 0; } static int forward_msg(struct msm_ipc_router_xprt_info *xprt_info, struct rr_packet *pkt) { struct rr_header_v1 *hdr; struct msm_ipc_router_xprt_info *fwd_xprt_info; struct msm_ipc_routing_table_entry *rt_entry; int ret = 0; int fwd_xprt_option; if (!xprt_info || !pkt) return -EINVAL; hdr = &(pkt->hdr); down_read(&routing_table_lock_lha3); rt_entry = lookup_routing_table(hdr->dst_node_id); if (!(rt_entry) || !(rt_entry->xprt_info)) { IPC_RTR_ERR("%s: Routing table not initialized\n", __func__); ret = -ENODEV; goto fm_error1; } down_read(&rt_entry->lock_lha4); fwd_xprt_info = rt_entry->xprt_info; ret = prepend_header(pkt, fwd_xprt_info); if (ret < 0) { IPC_RTR_ERR("%s: Prepend Header failed\n", __func__); goto fm_error2; } fwd_xprt_option = fwd_xprt_info->xprt->get_option(fwd_xprt_info->xprt); if (!(fwd_xprt_option & FRAG_PKT_WRITE_ENABLE)) { ret = defragment_pkt(pkt); if (ret < 0) goto fm_error2; } mutex_lock(&fwd_xprt_info->tx_lock_lhb2); if (xprt_info->remote_node_id == fwd_xprt_info->remote_node_id) { IPC_RTR_ERR("%s: Discarding Command to route back\n", __func__); ret = -EINVAL; goto fm_error3; } if (xprt_info->xprt->link_id == fwd_xprt_info->xprt->link_id) { IPC_RTR_ERR("%s: DST in the same cluster\n", __func__); ret = 0; goto fm_error3; } fwd_xprt_info->xprt->write(pkt, pkt->length, fwd_xprt_info->xprt); fm_error3: mutex_unlock(&fwd_xprt_info->tx_lock_lhb2); fm_error2: up_read(&rt_entry->lock_lha4); fm_error1: up_read(&routing_table_lock_lha3); return ret; } static int msm_ipc_router_send_remove_client(struct comm_mode_info *mode_info, uint32_t node_id, uint32_t port_id) { union rr_control_msg msg; struct msm_ipc_router_xprt_info *tmp_xprt_info; int mode; void *xprt_info; int rc = 0; if (!mode_info) { IPC_RTR_ERR("%s: NULL mode_info\n", __func__); return -EINVAL; } mode = mode_info->mode; xprt_info = mode_info->xprt_info; memset(&msg, 0, sizeof(msg)); msg.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_CLIENT; msg.cli.node_id = node_id; msg.cli.port_id = port_id; if ((mode == SINGLE_LINK_MODE) && xprt_info) { down_read(&xprt_info_list_lock_lha5); list_for_each_entry(tmp_xprt_info, &xprt_info_list, list) { if (tmp_xprt_info != xprt_info) continue; msm_ipc_router_send_control_msg(tmp_xprt_info, &msg, IPC_ROUTER_DUMMY_DEST_NODE); break; } up_read(&xprt_info_list_lock_lha5); } else if ((mode == SINGLE_LINK_MODE) && !xprt_info) { broadcast_ctl_msg_locally(&msg); } else if (mode == MULTI_LINK_MODE) { broadcast_ctl_msg(&msg); broadcast_ctl_msg_locally(&msg); } else if (mode != NULL_MODE) { IPC_RTR_ERR( "%s: Invalid mode(%d) + xprt_inf(%p) for %08x:%08x\n", __func__, mode, xprt_info, node_id, port_id); rc = -EINVAL; } return rc; } static void update_comm_mode_info(struct comm_mode_info *mode_info, struct msm_ipc_router_xprt_info *xprt_info) { if (!mode_info) { IPC_RTR_ERR("%s: NULL mode_info\n", __func__); return; } if (mode_info->mode == NULL_MODE) { mode_info->xprt_info = xprt_info; mode_info->mode = SINGLE_LINK_MODE; } else if (mode_info->mode == SINGLE_LINK_MODE && mode_info->xprt_info != xprt_info) { mode_info->mode = MULTI_LINK_MODE; } return; } static void cleanup_rmt_server(struct msm_ipc_router_xprt_info *xprt_info, struct msm_ipc_router_remote_port *rport_ptr) { union rr_control_msg ctl; struct msm_ipc_server *server = rport_ptr->server; D("Remove server %08x:%08x - %08x:%08x", server->name.service, server->name.instance, rport_ptr->node_id, rport_ptr->port_id); memset(&ctl, 0, sizeof(ctl)); ctl.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_SERVER; ctl.srv.service = server->name.service; ctl.srv.instance = server->name.instance; ctl.srv.node_id = rport_ptr->node_id; ctl.srv.port_id = rport_ptr->port_id; if (xprt_info) relay_ctl_msg(xprt_info, &ctl); broadcast_ctl_msg_locally(&ctl); msm_ipc_router_destroy_server(server, rport_ptr->node_id, rport_ptr->port_id); } static void cleanup_rmt_ports(struct msm_ipc_router_xprt_info *xprt_info, struct msm_ipc_routing_table_entry *rt_entry) { struct msm_ipc_router_remote_port *rport_ptr, *tmp_rport_ptr; union rr_control_msg ctl; int j; memset(&ctl, 0, sizeof(ctl)); for (j = 0; j < RP_HASH_SIZE; j++) { list_for_each_entry_safe(rport_ptr, tmp_rport_ptr, &rt_entry->remote_port_list[j], list) { list_del(&rport_ptr->list); mutex_lock(&rport_ptr->quota_lock_lhb2); msm_ipc_router_free_resume_tx_port(rport_ptr); mutex_unlock(&rport_ptr->quota_lock_lhb2); if (rport_ptr->server) cleanup_rmt_server(xprt_info, rport_ptr); ctl.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_CLIENT; ctl.cli.node_id = rport_ptr->node_id; ctl.cli.port_id = rport_ptr->port_id; relay_ctl_msg(xprt_info, &ctl); broadcast_ctl_msg_locally(&ctl); kfree(rport_ptr); } } } static void msm_ipc_cleanup_routing_table( struct msm_ipc_router_xprt_info *xprt_info) { int i; struct msm_ipc_routing_table_entry *rt_entry, *tmp_rt_entry; if (!xprt_info) { IPC_RTR_ERR("%s: Invalid xprt_info\n", __func__); return; } down_write(&server_list_lock_lha2); down_write(&routing_table_lock_lha3); for (i = 0; i < RT_HASH_SIZE; i++) { list_for_each_entry_safe(rt_entry, tmp_rt_entry, &routing_table[i], list) { down_write(&rt_entry->lock_lha4); if (rt_entry->xprt_info != xprt_info) { up_write(&rt_entry->lock_lha4); continue; } cleanup_rmt_ports(xprt_info, rt_entry); rt_entry->xprt_info = NULL; up_write(&rt_entry->lock_lha4); list_del(&rt_entry->list); kfree(rt_entry); } } up_write(&routing_table_lock_lha3); up_write(&server_list_lock_lha2); } /** * sync_sec_rule() - Synchrnoize the security rule into the server structure * @server: Server structure where the rule has to be synchronized. * @rule: Security tule to be synchronized. * * This function is used to update the server structure with the security * rule configured for the <service:instance> corresponding to that server. */ static void sync_sec_rule(struct msm_ipc_server *server, void *rule) { struct msm_ipc_server_port *server_port; struct msm_ipc_router_remote_port *rport_ptr = NULL; down_read(&routing_table_lock_lha3); list_for_each_entry(server_port, &server->server_port_list, list) { rport_ptr = msm_ipc_router_lookup_remote_port( server_port->server_addr.node_id, server_port->server_addr.port_id); if (!rport_ptr) continue; rport_ptr->sec_rule = rule; } up_read(&routing_table_lock_lha3); server->synced_sec_rule = 1; } /** * msm_ipc_sync_sec_rule() - Sync the security rule to the service * @service: Service for which the rule has to be synchronized. * @instance: Instance for which the rule has to be synchronized. * @rule: Security rule to be synchronized. * * This function is used to syncrhonize the security rule with the server * hash table, if the user-space script configures the rule after the service * has come up. This function is used to synchronize the security rule to a * specific service and optionally a specific instance. */ void msm_ipc_sync_sec_rule(uint32_t service, uint32_t instance, void *rule) { int key = (service & (SRV_HASH_SIZE - 1)); struct msm_ipc_server *server; down_write(&server_list_lock_lha2); list_for_each_entry(server, &server_list[key], list) { if (server->name.service != service) continue; if (server->name.instance != instance && instance != ALL_INSTANCE) continue; /* If the rule applies to all instances and if the specific * instance of a service has a rule synchronized already, * do not apply the rule for that specific instance. */ if (instance == ALL_INSTANCE && server->synced_sec_rule) continue; sync_sec_rule(server, rule); } up_write(&server_list_lock_lha2); } /** * msm_ipc_sync_default_sec_rule() - Default security rule to all services * @rule: Security rule to be synchronized. * * This function is used to syncrhonize the security rule with the server * hash table, if the user-space script configures the rule after the service * has come up. This function is used to synchronize the security rule that * applies to all services, if the concerned service do not have any rule * defined. */ void msm_ipc_sync_default_sec_rule(void *rule) { int key; struct msm_ipc_server *server; down_write(&server_list_lock_lha2); for (key = 0; key < SRV_HASH_SIZE; key++) { list_for_each_entry(server, &server_list[key], list) { if (server->synced_sec_rule) continue; sync_sec_rule(server, rule); } } up_write(&server_list_lock_lha2); } static int process_hello_msg(struct msm_ipc_router_xprt_info *xprt_info, struct rr_header_v1 *hdr) { int i, rc = 0; union rr_control_msg ctl; struct msm_ipc_routing_table_entry *rt_entry; if (!hdr) return -EINVAL; RR("o HELLO NID %d\n", hdr->src_node_id); xprt_info->remote_node_id = hdr->src_node_id; /* * Find the entry from Routing Table corresponding to Node ID. * Under SSR, an entry will be found. When the system boots up * for the 1st time, an entry will not be found and hence allocate * an entry. Update the entry with the Node ID that it corresponds * to and the XPRT through which it can be reached. */ down_write(&routing_table_lock_lha3); rt_entry = lookup_routing_table(hdr->src_node_id); if (!rt_entry) { rt_entry = alloc_routing_table_entry(hdr->src_node_id); if (!rt_entry) { up_write(&routing_table_lock_lha3); IPC_RTR_ERR("%s: rt_entry allocation failed\n", __func__); return -ENOMEM; } add_routing_table_entry(rt_entry); } down_write(&rt_entry->lock_lha4); rt_entry->neighbor_node_id = xprt_info->remote_node_id; rt_entry->xprt_info = xprt_info; up_write(&rt_entry->lock_lha4); up_write(&routing_table_lock_lha3); /* Send a reply HELLO message */ memset(&ctl, 0, sizeof(ctl)); ctl.hello.cmd = IPC_ROUTER_CTRL_CMD_HELLO; rc = msm_ipc_router_send_control_msg(xprt_info, &ctl, IPC_ROUTER_DUMMY_DEST_NODE); if (rc < 0) { IPC_RTR_ERR("%s: Error sending reply HELLO message\n", __func__); return rc; } xprt_info->initialized = 1; /* Send list of servers from the local node and from nodes * outside the mesh network in which this XPRT is part of. */ down_read(&server_list_lock_lha2); down_read(&routing_table_lock_lha3); for (i = 0; i < RT_HASH_SIZE; i++) { list_for_each_entry(rt_entry, &routing_table[i], list) { if ((rt_entry->node_id != IPC_ROUTER_NID_LOCAL) && (!rt_entry->xprt_info || (rt_entry->xprt_info->xprt->link_id == xprt_info->xprt->link_id))) continue; rc = msm_ipc_router_send_server_list(rt_entry->node_id, xprt_info); if (rc < 0) { up_read(&routing_table_lock_lha3); up_read(&server_list_lock_lha2); return rc; } } } up_read(&routing_table_lock_lha3); up_read(&server_list_lock_lha2); RR("HELLO message processed\n"); return rc; } static int process_resume_tx_msg(union rr_control_msg *msg, struct rr_packet *pkt) { struct msm_ipc_router_remote_port *rport_ptr; int ret = 0; RR("o RESUME_TX id=%d:%08x\n", msg->cli.node_id, msg->cli.port_id); down_read(&local_ports_lock_lha2); down_read(&routing_table_lock_lha3); rport_ptr = msm_ipc_router_lookup_remote_port(msg->cli.node_id, msg->cli.port_id); if (!rport_ptr) { IPC_RTR_ERR("%s: Unable to resume client\n", __func__); ret = -ENODEV; goto prtm_out; } mutex_lock(&rport_ptr->quota_lock_lhb2); rport_ptr->tx_quota_cnt = 0; post_resume_tx(rport_ptr, pkt, msg); mutex_unlock(&rport_ptr->quota_lock_lhb2); prtm_out: up_read(&routing_table_lock_lha3); up_read(&local_ports_lock_lha2); return 0; } static int process_new_server_msg(struct msm_ipc_router_xprt_info *xprt_info, union rr_control_msg *msg, struct rr_packet *pkt) { struct msm_ipc_routing_table_entry *rt_entry; struct msm_ipc_server *server; struct msm_ipc_router_remote_port *rport_ptr; if (msg->srv.instance == 0) { IPC_RTR_ERR("%s: Server %08x create rejected, version = 0\n", __func__, msg->srv.service); return -EINVAL; } RR("o NEW_SERVER id=%d:%08x service=%08x:%08x\n", msg->srv.node_id, msg->srv.port_id, msg->srv.service, msg->srv.instance); /* * Find the entry from Routing Table corresponding to Node ID. * Under SSR, an entry will be found. When the subsystem hosting * service is not adjacent, an entry will not be found and hence * allocate an entry. Update the entry with the Node ID that it * corresponds to and the XPRT through which it can be reached. */ down_write(&routing_table_lock_lha3); rt_entry = lookup_routing_table(msg->srv.node_id); if (!rt_entry) { rt_entry = alloc_routing_table_entry(msg->srv.node_id); if (!rt_entry) { up_write(&routing_table_lock_lha3); IPC_RTR_ERR("%s: rt_entry allocation failed\n", __func__); return -ENOMEM; } down_write(&rt_entry->lock_lha4); rt_entry->neighbor_node_id = xprt_info->remote_node_id; rt_entry->xprt_info = xprt_info; up_write(&rt_entry->lock_lha4); add_routing_table_entry(rt_entry); } up_write(&routing_table_lock_lha3); /* If the service does not exist already in the database, create and * store the service info. Create a remote port structure in which * the service is hosted and cache the security rule for the service * in that remote port structure. */ down_write(&server_list_lock_lha2); server = msm_ipc_router_lookup_server(msg->srv.service, msg->srv.instance, msg->srv.node_id, msg->srv.port_id); if (!server) { server = msm_ipc_router_create_server( msg->srv.service, msg->srv.instance, msg->srv.node_id, msg->srv.port_id, xprt_info); if (!server) { up_write(&server_list_lock_lha2); IPC_RTR_ERR("%s: Server Create failed\n", __func__); return -ENOMEM; } down_read(&routing_table_lock_lha3); if (!msm_ipc_router_lookup_remote_port( msg->srv.node_id, msg->srv.port_id)) { rport_ptr = msm_ipc_router_create_remote_port( msg->srv.node_id, msg->srv.port_id); if (!rport_ptr) { up_read(&routing_table_lock_lha3); up_write(&server_list_lock_lha2); return -ENOMEM; } rport_ptr->server = server; rport_ptr->sec_rule = msm_ipc_get_security_rule( msg->srv.service, msg->srv.instance); } up_read(&routing_table_lock_lha3); } up_write(&server_list_lock_lha2); /* Relay the new server message to other subsystems that do not belong * to the cluster from which this message is received. Notify the * local clients waiting for this service. */ relay_ctl_msg(xprt_info, msg); post_control_ports(pkt); return 0; } static int process_rmv_server_msg(struct msm_ipc_router_xprt_info *xprt_info, union rr_control_msg *msg, struct rr_packet *pkt) { struct msm_ipc_server *server; struct msm_ipc_router_remote_port *rport_ptr; RR("o REMOVE_SERVER service=%08x:%d\n", msg->srv.service, msg->srv.instance); down_write(&server_list_lock_lha2); server = msm_ipc_router_lookup_server(msg->srv.service, msg->srv.instance, msg->srv.node_id, msg->srv.port_id); down_write(&routing_table_lock_lha3); rport_ptr = msm_ipc_router_lookup_remote_port(msg->srv.node_id, msg->srv.port_id); if (rport_ptr && rport_ptr->server == server) rport_ptr->server = NULL; up_write(&routing_table_lock_lha3); if (server) { msm_ipc_router_destroy_server(server, msg->srv.node_id, msg->srv.port_id); /* * Relay the new server message to other subsystems that do not * belong to the cluster from which this message is received. * Notify the local clients communicating with the service. */ relay_ctl_msg(xprt_info, msg); post_control_ports(pkt); } up_write(&server_list_lock_lha2); return 0; } static int process_rmv_client_msg(struct msm_ipc_router_xprt_info *xprt_info, union rr_control_msg *msg, struct rr_packet *pkt) { struct msm_ipc_router_remote_port *rport_ptr; RR("o REMOVE_CLIENT id=%d:%08x\n", msg->cli.node_id, msg->cli.port_id); down_write(&server_list_lock_lha2); down_write(&routing_table_lock_lha3); rport_ptr = msm_ipc_router_lookup_remote_port(msg->cli.node_id, msg->cli.port_id); if (rport_ptr) { if (rport_ptr->server) cleanup_rmt_server(NULL, rport_ptr); msm_ipc_router_destroy_remote_port(rport_ptr); } up_write(&routing_table_lock_lha3); up_write(&server_list_lock_lha2); relay_ctl_msg(xprt_info, msg); post_control_ports(pkt); return 0; } static int process_control_msg(struct msm_ipc_router_xprt_info *xprt_info, struct rr_packet *pkt) { union rr_control_msg *msg; int rc = 0; struct rr_header_v1 *hdr; if (pkt->length != sizeof(*msg)) { IPC_RTR_ERR("%s: r2r msg size %d != %zu\n", __func__, pkt->length, sizeof(*msg)); return -EINVAL; } hdr = &(pkt->hdr); msg = msm_ipc_router_skb_to_buf(pkt->pkt_fragment_q, sizeof(*msg)); if (!msg) { IPC_RTR_ERR("%s: Error extracting control msg\n", __func__); return -ENOMEM; } switch (msg->cmd) { case IPC_ROUTER_CTRL_CMD_HELLO: rc = process_hello_msg(xprt_info, hdr); break; case IPC_ROUTER_CTRL_CMD_RESUME_TX: rc = process_resume_tx_msg(msg, pkt); break; case IPC_ROUTER_CTRL_CMD_NEW_SERVER: rc = process_new_server_msg(xprt_info, msg, pkt); break; case IPC_ROUTER_CTRL_CMD_REMOVE_SERVER: rc = process_rmv_server_msg(xprt_info, msg, pkt); break; case IPC_ROUTER_CTRL_CMD_REMOVE_CLIENT: rc = process_rmv_client_msg(xprt_info, msg, pkt); break; default: RR("o UNKNOWN(%08x)\n", msg->cmd); rc = -ENOSYS; } kfree(msg); return rc; } static void do_read_data(struct work_struct *work) { struct rr_header_v1 *hdr; struct rr_packet *pkt = NULL; struct msm_ipc_port *port_ptr; struct msm_ipc_router_remote_port *rport_ptr; int ret; struct msm_ipc_router_xprt_info *xprt_info = container_of(work, struct msm_ipc_router_xprt_info, read_data); while ((pkt = rr_read(xprt_info)) != NULL) { if (pkt->length < calc_rx_header_size(xprt_info) || pkt->length > MAX_IPC_PKT_SIZE) { IPC_RTR_ERR("%s: Invalid pkt length %d\n", __func__, pkt->length); goto fail_data; } ret = extract_header(pkt); if (ret < 0) goto fail_data; hdr = &(pkt->hdr); RAW("ver=%d type=%d src=%d:%08x crx=%d siz=%d dst=%d:%08x\n", hdr->version, hdr->type, hdr->src_node_id, hdr->src_port_id, hdr->control_flag, hdr->size, hdr->dst_node_id, hdr->dst_port_id); if ((hdr->dst_node_id != IPC_ROUTER_NID_LOCAL) && ((hdr->type == IPC_ROUTER_CTRL_CMD_RESUME_TX) || (hdr->type == IPC_ROUTER_CTRL_CMD_DATA))) { forward_msg(xprt_info, pkt); release_pkt(pkt); continue; } if (hdr->type != IPC_ROUTER_CTRL_CMD_DATA) { process_control_msg(xprt_info, pkt); release_pkt(pkt); continue; } #if defined(CONFIG_ARCH_MSM) if (msm_ipc_router_debug_mask & SMEM_LOG) { smem_log_event((SMEM_LOG_PROC_ID_APPS | SMEM_LOG_IPC_ROUTER_EVENT_BASE | IPC_ROUTER_LOG_EVENT_RX), (hdr->src_node_id << 24) | (hdr->src_port_id & 0xffffff), (hdr->dst_node_id << 24) | (hdr->dst_port_id & 0xffffff), (hdr->type << 24) | (hdr->control_flag << 16) | (hdr->size & 0xffff)); } #endif down_read(&local_ports_lock_lha2); port_ptr = msm_ipc_router_lookup_local_port(hdr->dst_port_id); if (!port_ptr) { IPC_RTR_ERR("%s: No local port id %08x\n", __func__, hdr->dst_port_id); up_read(&local_ports_lock_lha2); release_pkt(pkt); return; } down_read(&routing_table_lock_lha3); rport_ptr = msm_ipc_router_lookup_remote_port(hdr->src_node_id, hdr->src_port_id); if (!rport_ptr) { rport_ptr = msm_ipc_router_create_remote_port( hdr->src_node_id, hdr->src_port_id); if (!rport_ptr) { IPC_RTR_ERR( "%s: Rmt Prt %08x:%08x create failed\n", __func__, hdr->src_node_id, hdr->src_port_id); up_read(&routing_table_lock_lha3); up_read(&local_ports_lock_lha2); release_pkt(pkt); return; } } up_read(&routing_table_lock_lha3); post_pkt_to_port(port_ptr, pkt, 0); up_read(&local_ports_lock_lha2); } return; fail_data: release_pkt(pkt); IPC_RTR_ERR("%s: ipc_router has died\n", __func__); } int msm_ipc_router_register_server(struct msm_ipc_port *port_ptr, struct msm_ipc_addr *name) { struct msm_ipc_server *server; union rr_control_msg ctl; if (!port_ptr || !name) return -EINVAL; if (name->addrtype != MSM_IPC_ADDR_NAME) return -EINVAL; down_write(&server_list_lock_lha2); server = msm_ipc_router_lookup_server(name->addr.port_name.service, name->addr.port_name.instance, IPC_ROUTER_NID_LOCAL, port_ptr->this_port.port_id); if (server) { up_write(&server_list_lock_lha2); IPC_RTR_ERR("%s: Server already present\n", __func__); return -EINVAL; } server = msm_ipc_router_create_server(name->addr.port_name.service, name->addr.port_name.instance, IPC_ROUTER_NID_LOCAL, port_ptr->this_port.port_id, NULL); if (!server) { up_write(&server_list_lock_lha2); IPC_RTR_ERR("%s: Server Creation failed\n", __func__); return -EINVAL; } memset(&ctl, 0, sizeof(ctl)); ctl.cmd = IPC_ROUTER_CTRL_CMD_NEW_SERVER; ctl.srv.service = server->name.service; ctl.srv.instance = server->name.instance; ctl.srv.node_id = IPC_ROUTER_NID_LOCAL; ctl.srv.port_id = port_ptr->this_port.port_id; up_write(&server_list_lock_lha2); broadcast_ctl_msg(&ctl); broadcast_ctl_msg_locally(&ctl); mutex_lock(&port_ptr->port_lock_lhb1); port_ptr->type = SERVER_PORT; port_ptr->mode_info.mode = MULTI_LINK_MODE; port_ptr->port_name.service = server->name.service; port_ptr->port_name.instance = server->name.instance; mutex_unlock(&port_ptr->port_lock_lhb1); return 0; } int msm_ipc_router_unregister_server(struct msm_ipc_port *port_ptr) { struct msm_ipc_server *server; union rr_control_msg ctl; if (!port_ptr) return -EINVAL; if (port_ptr->type != SERVER_PORT) { IPC_RTR_ERR("%s: Trying to unregister a non-server port\n", __func__); return -EINVAL; } if (port_ptr->this_port.node_id != IPC_ROUTER_NID_LOCAL) { IPC_RTR_ERR( "%s: Trying to unregister a remote server locally\n", __func__); return -EINVAL; } down_write(&server_list_lock_lha2); server = msm_ipc_router_lookup_server(port_ptr->port_name.service, port_ptr->port_name.instance, port_ptr->this_port.node_id, port_ptr->this_port.port_id); if (!server) { up_write(&server_list_lock_lha2); IPC_RTR_ERR("%s: Server lookup failed\n", __func__); return -ENODEV; } memset(&ctl, 0, sizeof(ctl)); ctl.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_SERVER; ctl.srv.service = server->name.service; ctl.srv.instance = server->name.instance; ctl.srv.node_id = IPC_ROUTER_NID_LOCAL; ctl.srv.port_id = port_ptr->this_port.port_id; msm_ipc_router_destroy_server(server, port_ptr->this_port.node_id, port_ptr->this_port.port_id); up_write(&server_list_lock_lha2); broadcast_ctl_msg(&ctl); broadcast_ctl_msg_locally(&ctl); mutex_lock(&port_ptr->port_lock_lhb1); port_ptr->type = CLIENT_PORT; mutex_unlock(&port_ptr->port_lock_lhb1); return 0; } static int loopback_data(struct msm_ipc_port *src, uint32_t port_id, struct sk_buff_head *data) { struct rr_header_v1 *hdr; struct msm_ipc_port *port_ptr; struct rr_packet *pkt; int ret_len; struct sk_buff *temp_skb; int align_size; if (!data) { IPC_RTR_ERR("%s: Invalid pkt pointer\n", __func__); return -EINVAL; } pkt = create_pkt(data); if (!pkt) { IPC_RTR_ERR("%s: New pkt create failed\n", __func__); return -ENOMEM; } hdr = &(pkt->hdr); hdr->version = IPC_ROUTER_V1; hdr->type = IPC_ROUTER_CTRL_CMD_DATA; hdr->src_node_id = src->this_port.node_id; hdr->src_port_id = src->this_port.port_id; hdr->size = pkt->length; hdr->control_flag = 0; hdr->dst_node_id = IPC_ROUTER_NID_LOCAL; hdr->dst_port_id = port_id; temp_skb = skb_peek_tail(pkt->pkt_fragment_q); align_size = ALIGN_SIZE(pkt->length); skb_put(temp_skb, align_size); pkt->length += align_size; down_read(&local_ports_lock_lha2); port_ptr = msm_ipc_router_lookup_local_port(port_id); if (!port_ptr) { IPC_RTR_ERR("%s: Local port %d not present\n", __func__, port_id); up_read(&local_ports_lock_lha2); pkt->pkt_fragment_q = NULL; release_pkt(pkt); return -ENODEV; } ret_len = hdr->size; post_pkt_to_port(port_ptr, pkt, 0); update_comm_mode_info(&src->mode_info, NULL); up_read(&local_ports_lock_lha2); return ret_len; } static int msm_ipc_router_write_pkt(struct msm_ipc_port *src, struct msm_ipc_router_remote_port *rport_ptr, struct rr_packet *pkt) { struct rr_header_v1 *hdr; struct msm_ipc_router_xprt_info *xprt_info; struct msm_ipc_routing_table_entry *rt_entry; struct msm_ipc_resume_tx_port *resume_tx_port; struct sk_buff *temp_skb; int xprt_option; int ret; int align_size; if (!rport_ptr || !src || !pkt) return -EINVAL; hdr = &(pkt->hdr); hdr->type = IPC_ROUTER_CTRL_CMD_DATA; hdr->src_node_id = src->this_port.node_id; hdr->src_port_id = src->this_port.port_id; hdr->size = pkt->length; hdr->control_flag = 0; hdr->dst_node_id = rport_ptr->node_id; hdr->dst_port_id = rport_ptr->port_id; mutex_lock(&rport_ptr->quota_lock_lhb2); if (rport_ptr->tx_quota_cnt == IPC_ROUTER_DEFAULT_RX_QUOTA) { if (msm_ipc_router_lookup_resume_tx_port( rport_ptr, src->this_port.port_id)) { mutex_unlock(&rport_ptr->quota_lock_lhb2); return -EAGAIN; } resume_tx_port = kzalloc(sizeof(struct msm_ipc_resume_tx_port), GFP_KERNEL); if (!resume_tx_port) { IPC_RTR_ERR("%s: Resume_Tx port allocation failed\n", __func__); mutex_unlock(&rport_ptr->quota_lock_lhb2); return -ENOMEM; } INIT_LIST_HEAD(&resume_tx_port->list); resume_tx_port->port_id = src->this_port.port_id; resume_tx_port->node_id = src->this_port.node_id; list_add_tail(&resume_tx_port->list, &rport_ptr->resume_tx_port_list); mutex_unlock(&rport_ptr->quota_lock_lhb2); return -EAGAIN; } rport_ptr->tx_quota_cnt++; if (rport_ptr->tx_quota_cnt == IPC_ROUTER_DEFAULT_RX_QUOTA) hdr->control_flag |= CONTROL_FLAG_CONFIRM_RX; mutex_unlock(&rport_ptr->quota_lock_lhb2); rt_entry = lookup_routing_table(hdr->dst_node_id); if (!rt_entry || !rt_entry->xprt_info) { IPC_RTR_ERR("%s: Remote node %d not up\n", __func__, hdr->dst_node_id); return -ENODEV; } down_read(&rt_entry->lock_lha4); xprt_info = rt_entry->xprt_info; ret = prepend_header(pkt, xprt_info); if (ret < 0) { up_read(&rt_entry->lock_lha4); IPC_RTR_ERR("%s: Prepend Header failed\n", __func__); return ret; } xprt_option = xprt_info->xprt->get_option(xprt_info->xprt); if (!(xprt_option & FRAG_PKT_WRITE_ENABLE)) { ret = defragment_pkt(pkt); if (ret < 0) { up_read(&rt_entry->lock_lha4); return ret; } } temp_skb = skb_peek_tail(pkt->pkt_fragment_q); align_size = ALIGN_SIZE(pkt->length); skb_put(temp_skb, align_size); pkt->length += align_size; mutex_lock(&xprt_info->tx_lock_lhb2); ret = xprt_info->xprt->write(pkt, pkt->length, xprt_info->xprt); mutex_unlock(&xprt_info->tx_lock_lhb2); up_read(&rt_entry->lock_lha4); if (ret < 0) { IPC_RTR_ERR("%s: Write on XPRT failed\n", __func__); return ret; } update_comm_mode_info(&src->mode_info, xprt_info); RAW_HDR( "[w rr_h] ver=%i,type=%s,src_nid=%08x,src_port_id=%08x,control_flag=%i,size=%3i,dst_pid=%08x,dst_cid=%08x\n", hdr->version, type_to_str(hdr->type), hdr->src_node_id, hdr->src_port_id, hdr->control_flag, hdr->size, hdr->dst_node_id, hdr->dst_port_id); #if defined(CONFIG_ARCH_MSM) if (msm_ipc_router_debug_mask & SMEM_LOG) { smem_log_event((SMEM_LOG_PROC_ID_APPS | SMEM_LOG_IPC_ROUTER_EVENT_BASE | IPC_ROUTER_LOG_EVENT_TX), (hdr->src_node_id << 24) | (hdr->src_port_id & 0xffffff), (hdr->dst_node_id << 24) | (hdr->dst_port_id & 0xffffff), (hdr->type << 24) | (hdr->control_flag << 16) | (hdr->size & 0xffff)); } #endif return hdr->size; } int msm_ipc_router_send_to(struct msm_ipc_port *src, struct sk_buff_head *data, struct msm_ipc_addr *dest) { uint32_t dst_node_id = 0, dst_port_id = 0; struct msm_ipc_server *server; struct msm_ipc_server_port *server_port; struct msm_ipc_router_remote_port *rport_ptr = NULL; struct rr_packet *pkt; int ret; if (!src || !data || !dest) { IPC_RTR_ERR("%s: Invalid Parameters\n", __func__); return -EINVAL; } /* Resolve Address*/ if (dest->addrtype == MSM_IPC_ADDR_ID) { dst_node_id = dest->addr.port_addr.node_id; dst_port_id = dest->addr.port_addr.port_id; } else if (dest->addrtype == MSM_IPC_ADDR_NAME) { down_read(&server_list_lock_lha2); server = msm_ipc_router_lookup_server( dest->addr.port_name.service, dest->addr.port_name.instance, 0, 0); if (!server) { up_read(&server_list_lock_lha2); IPC_RTR_ERR("%s: Destination not reachable\n", __func__); return -ENODEV; } server_port = list_first_entry(&server->server_port_list, struct msm_ipc_server_port, list); dst_node_id = server_port->server_addr.node_id; dst_port_id = server_port->server_addr.port_id; up_read(&server_list_lock_lha2); } if (dst_node_id == IPC_ROUTER_NID_LOCAL) { ret = loopback_data(src, dst_port_id, data); return ret; } down_read(&routing_table_lock_lha3); rport_ptr = msm_ipc_router_lookup_remote_port(dst_node_id, dst_port_id); if (!rport_ptr) { up_read(&routing_table_lock_lha3); IPC_RTR_ERR("%s: Remote port not found\n", __func__); return -ENODEV; } if (src->check_send_permissions) { ret = src->check_send_permissions(rport_ptr->sec_rule); if (ret <= 0) { up_read(&routing_table_lock_lha3); IPC_RTR_ERR("%s: permission failure for %s\n", __func__, current->comm); return -EPERM; } } pkt = create_pkt(data); if (!pkt) { up_read(&routing_table_lock_lha3); IPC_RTR_ERR("%s: Pkt creation failed\n", __func__); return -ENOMEM; } ret = msm_ipc_router_write_pkt(src, rport_ptr, pkt); up_read(&routing_table_lock_lha3); if (ret < 0) pkt->pkt_fragment_q = NULL; release_pkt(pkt); return ret; } int msm_ipc_router_send_msg(struct msm_ipc_port *src, struct msm_ipc_addr *dest, void *data, unsigned int data_len) { struct sk_buff_head *out_skb_head; int ret; out_skb_head = msm_ipc_router_buf_to_skb(data, data_len); if (!out_skb_head) { IPC_RTR_ERR("%s: SKB conversion failed\n", __func__); return -EFAULT; } ret = msm_ipc_router_send_to(src, out_skb_head, dest); if (ret < 0) { if (ret != -EAGAIN) IPC_RTR_ERR( "%s: msm_ipc_router_send_to failed - ret: %d\n", __func__, ret); msm_ipc_router_free_skb(out_skb_head); return ret; } return 0; } /** * msm_ipc_router_send_resume_tx() - Send Resume_Tx message * @data: Pointer to received data packet that has confirm_rx bit set * * @return: On success, number of bytes transferred is returned, else * standard linux error code is returned. * * This function sends the Resume_Tx event to the remote node that * sent the data with confirm_rx field set. In case of a multi-hop * scenario also, this function makes sure that the destination node_id * to which the resume_tx event should reach is right. */ static int msm_ipc_router_send_resume_tx(void *data) { union rr_control_msg msg; struct rr_header_v1 *hdr = (struct rr_header_v1 *)data; struct msm_ipc_routing_table_entry *rt_entry; int ret; memset(&msg, 0, sizeof(msg)); msg.cmd = IPC_ROUTER_CTRL_CMD_RESUME_TX; msg.cli.node_id = hdr->dst_node_id; msg.cli.port_id = hdr->dst_port_id; down_read(&routing_table_lock_lha3); rt_entry = lookup_routing_table(hdr->src_node_id); if (!rt_entry) { IPC_RTR_ERR("%s: %d Node is not present", __func__, hdr->src_node_id); up_read(&routing_table_lock_lha3); return -ENODEV; } RR("x RESUME_TX id=%d:%08x\n", msg.cli.node_id, msg.cli.port_id); ret = msm_ipc_router_send_control_msg(rt_entry->xprt_info, &msg, hdr->src_node_id); up_read(&routing_table_lock_lha3); if (ret < 0) IPC_RTR_ERR( "%s: Send Resume_Tx Failed SRC_NODE: %d SRC_PORT: %d DEST_NODE: %d", __func__, hdr->dst_node_id, hdr->dst_port_id, hdr->src_node_id); return ret; } int msm_ipc_router_read(struct msm_ipc_port *port_ptr, struct rr_packet **read_pkt, size_t buf_len) { struct rr_packet *pkt; if (!port_ptr || !read_pkt) return -EINVAL; mutex_lock(&port_ptr->port_rx_q_lock_lhb3); if (list_empty(&port_ptr->port_rx_q)) { mutex_unlock(&port_ptr->port_rx_q_lock_lhb3); return -EAGAIN; } pkt = list_first_entry(&port_ptr->port_rx_q, struct rr_packet, list); if ((buf_len) && (pkt->hdr.size > buf_len)) { mutex_unlock(&port_ptr->port_rx_q_lock_lhb3); return -ETOOSMALL; } list_del(&pkt->list); if (list_empty(&port_ptr->port_rx_q)) __pm_relax(&port_ptr->port_rx_ws); *read_pkt = pkt; mutex_unlock(&port_ptr->port_rx_q_lock_lhb3); if (pkt->hdr.control_flag & CONTROL_FLAG_CONFIRM_RX) msm_ipc_router_send_resume_tx(&pkt->hdr); return pkt->length; } /** * msm_ipc_router_rx_data_wait() - Wait for new message destined to a local port. * @port_ptr: Pointer to the local port * @timeout: < 0 timeout indicates infinite wait till a message arrives. * > 0 timeout indicates the wait time. * 0 indicates that we do not wait. * @return: 0 if there are pending messages to read, * standard Linux error code otherwise. * * Checks for the availability of messages that are destined to a local port. * If no messages are present then waits as per @timeout. */ int msm_ipc_router_rx_data_wait(struct msm_ipc_port *port_ptr, long timeout) { int ret = 0; mutex_lock(&port_ptr->port_rx_q_lock_lhb3); while (list_empty(&port_ptr->port_rx_q)) { mutex_unlock(&port_ptr->port_rx_q_lock_lhb3); if (timeout < 0) { ret = wait_event_interruptible( port_ptr->port_rx_wait_q, !list_empty(&port_ptr->port_rx_q)); if (ret) return ret; } else if (timeout > 0) { timeout = wait_event_interruptible_timeout( port_ptr->port_rx_wait_q, !list_empty(&port_ptr->port_rx_q), timeout); if (timeout < 0) return -EFAULT; } if (timeout == 0) return -ENOMSG; mutex_lock(&port_ptr->port_rx_q_lock_lhb3); } mutex_unlock(&port_ptr->port_rx_q_lock_lhb3); return ret; } /** * msm_ipc_router_recv_from() - Recieve messages destined to a local port. * @port_ptr: Pointer to the local port * @pkt : Pointer to the router-to-router packet * @src: Pointer to local port address * @timeout: < 0 timeout indicates infinite wait till a message arrives. * > 0 timeout indicates the wait time. * 0 indicates that we do not wait. * @return: = Number of bytes read(On successful read operation). * = -ENOMSG (If there are no pending messages and timeout is 0). * = -EINVAL (If either of the arguments, port_ptr or data is invalid) * = -EFAULT (If there are no pending messages when timeout is > 0 * and the wait_event_interruptible_timeout has returned value > 0) * = -ERESTARTSYS (If there are no pending messages when timeout * is < 0 and wait_event_interruptible was interrupted by a signal) * * This function reads the messages that are destined for a local port. It * is used by modules that exist with-in the kernel and use IPC Router for * transport. The function checks if there are any messages that are already * received. If yes, it reads them, else it waits as per the timeout value. * On a successful read, the return value of the function indicates the number * of bytes that are read. */ int msm_ipc_router_recv_from(struct msm_ipc_port *port_ptr, struct rr_packet **pkt, struct msm_ipc_addr *src, long timeout) { int ret, data_len, align_size; struct sk_buff *temp_skb; struct rr_header_v1 *hdr = NULL; if (!port_ptr || !pkt) { IPC_RTR_ERR("%s: Invalid pointers being passed\n", __func__); return -EINVAL; } *pkt = NULL; ret = msm_ipc_router_rx_data_wait(port_ptr, timeout); if (ret) return ret; ret = msm_ipc_router_read(port_ptr, pkt, 0); if (ret <= 0 || !(*pkt)) return ret; hdr = &((*pkt)->hdr); if (src) { src->addrtype = MSM_IPC_ADDR_ID; src->addr.port_addr.node_id = hdr->src_node_id; src->addr.port_addr.port_id = hdr->src_port_id; } data_len = hdr->size; align_size = ALIGN_SIZE(data_len); if (align_size) { temp_skb = skb_peek_tail((*pkt)->pkt_fragment_q); skb_trim(temp_skb, (temp_skb->len - align_size)); } return data_len; } int msm_ipc_router_read_msg(struct msm_ipc_port *port_ptr, struct msm_ipc_addr *src, unsigned char **data, unsigned int *len) { struct rr_packet *pkt; int ret; ret = msm_ipc_router_recv_from(port_ptr, &pkt, src, 0); if (ret < 0) { if (ret != -ENOMSG) IPC_RTR_ERR( "%s: msm_ipc_router_recv_from failed - ret: %d\n", __func__, ret); return ret; } *data = msm_ipc_router_skb_to_buf(pkt->pkt_fragment_q, ret); if (!(*data)) IPC_RTR_ERR("%s: Buf conversion failed\n", __func__); *len = ret; release_pkt(pkt); return 0; } /** * msm_ipc_router_create_port() - Create a IPC Router port/endpoint * @notify: Callback function to notify any event on the port. * @event: Event ID to be handled. * @oob_data: Any out-of-band data associated with the event. * @oob_data_len: Size of the out-of-band data, if valid. * @priv: Private data registered during the port creation. * @priv: Private info to be passed while the notification is generated. * * @return: Pointer to the port on success, NULL on error. */ struct msm_ipc_port *msm_ipc_router_create_port( void (*notify)(unsigned event, void *oob_data, size_t oob_data_len, void *priv), void *priv) { struct msm_ipc_port *port_ptr; int ret; ret = wait_for_completion_interruptible(&msm_ipc_local_router_up); if (ret < 0) { IPC_RTR_ERR("%s: Error waiting for local router\n", __func__); return NULL; } port_ptr = msm_ipc_router_create_raw_port(NULL, notify, priv); if (!port_ptr) IPC_RTR_ERR("%s: port_ptr alloc failed\n", __func__); return port_ptr; } int msm_ipc_router_close_port(struct msm_ipc_port *port_ptr) { union rr_control_msg msg; struct rr_packet *pkt, *temp_pkt; struct msm_ipc_server *server; if (!port_ptr) return -EINVAL; if (port_ptr->type == SERVER_PORT || port_ptr->type == CLIENT_PORT) { down_write(&local_ports_lock_lha2); list_del(&port_ptr->list); up_write(&local_ports_lock_lha2); if (port_ptr->type == SERVER_PORT) { memset(&msg, 0, sizeof(msg)); msg.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_SERVER; msg.srv.service = port_ptr->port_name.service; msg.srv.instance = port_ptr->port_name.instance; msg.srv.node_id = port_ptr->this_port.node_id; msg.srv.port_id = port_ptr->this_port.port_id; RR("x REMOVE_SERVER Name=%d:%08x Id=%d:%08x\n", msg.srv.service, msg.srv.instance, msg.srv.node_id, msg.srv.port_id); broadcast_ctl_msg(&msg); broadcast_ctl_msg_locally(&msg); } /* Server port could have been a client port earlier. * Send REMOVE_CLIENT message in either case. */ RR("x REMOVE_CLIENT id=%d:%08x\n", port_ptr->this_port.node_id, port_ptr->this_port.port_id); msm_ipc_router_send_remove_client(&port_ptr->mode_info, port_ptr->this_port.node_id, port_ptr->this_port.port_id); } else if (port_ptr->type == CONTROL_PORT) { down_write(&control_ports_lock_lha5); list_del(&port_ptr->list); up_write(&control_ports_lock_lha5); } else if (port_ptr->type == IRSC_PORT) { down_write(&local_ports_lock_lha2); list_del(&port_ptr->list); up_write(&local_ports_lock_lha2); signal_irsc_completion(); } mutex_lock(&port_ptr->port_rx_q_lock_lhb3); list_for_each_entry_safe(pkt, temp_pkt, &port_ptr->port_rx_q, list) { list_del(&pkt->list); release_pkt(pkt); } mutex_unlock(&port_ptr->port_rx_q_lock_lhb3); if (port_ptr->type == SERVER_PORT) { down_write(&server_list_lock_lha2); server = msm_ipc_router_lookup_server( port_ptr->port_name.service, port_ptr->port_name.instance, port_ptr->this_port.node_id, port_ptr->this_port.port_id); if (server) msm_ipc_router_destroy_server(server, port_ptr->this_port.node_id, port_ptr->this_port.port_id); up_write(&server_list_lock_lha2); } wakeup_source_trash(&port_ptr->port_rx_ws); kfree(port_ptr); return 0; } int msm_ipc_router_get_curr_pkt_size(struct msm_ipc_port *port_ptr) { struct rr_packet *pkt; int rc = 0; if (!port_ptr) return -EINVAL; mutex_lock(&port_ptr->port_rx_q_lock_lhb3); if (!list_empty(&port_ptr->port_rx_q)) { pkt = list_first_entry(&port_ptr->port_rx_q, struct rr_packet, list); rc = pkt->length; } mutex_unlock(&port_ptr->port_rx_q_lock_lhb3); return rc; } int msm_ipc_router_bind_control_port(struct msm_ipc_port *port_ptr) { if (!port_ptr) return -EINVAL; down_write(&local_ports_lock_lha2); list_del(&port_ptr->list); up_write(&local_ports_lock_lha2); port_ptr->type = CONTROL_PORT; down_write(&control_ports_lock_lha5); list_add_tail(&port_ptr->list, &control_ports); up_write(&control_ports_lock_lha5); return 0; } int msm_ipc_router_lookup_server_name(struct msm_ipc_port_name *srv_name, struct msm_ipc_server_info *srv_info, int num_entries_in_array, uint32_t lookup_mask) { struct msm_ipc_server *server; struct msm_ipc_server_port *server_port; int key, i = 0; /*num_entries_found*/ if (!srv_name) { IPC_RTR_ERR("%s: Invalid srv_name\n", __func__); return -EINVAL; } if (num_entries_in_array && !srv_info) { IPC_RTR_ERR("%s: srv_info NULL\n", __func__); return -EINVAL; } down_read(&server_list_lock_lha2); if (!lookup_mask) lookup_mask = 0xFFFFFFFF; key = (srv_name->service & (SRV_HASH_SIZE - 1)); list_for_each_entry(server, &server_list[key], list) { if ((server->name.service != srv_name->service) || ((server->name.instance & lookup_mask) != srv_name->instance)) continue; list_for_each_entry(server_port, &server->server_port_list, list) { if (i < num_entries_in_array) { srv_info[i].node_id = server_port->server_addr.node_id; srv_info[i].port_id = server_port->server_addr.port_id; srv_info[i].service = server->name.service; srv_info[i].instance = server->name.instance; } i++; } } up_read(&server_list_lock_lha2); return i; } int msm_ipc_router_close(void) { struct msm_ipc_router_xprt_info *xprt_info, *tmp_xprt_info; down_write(&xprt_info_list_lock_lha5); list_for_each_entry_safe(xprt_info, tmp_xprt_info, &xprt_info_list, list) { xprt_info->xprt->close(xprt_info->xprt); list_del(&xprt_info->list); kfree(xprt_info); } up_write(&xprt_info_list_lock_lha5); return 0; } #if defined(CONFIG_DEBUG_FS) static int dump_routing_table(char *buf, int max) { int i = 0, j; struct msm_ipc_routing_table_entry *rt_entry; for (j = 0; j < RT_HASH_SIZE; j++) { down_read(&routing_table_lock_lha3); list_for_each_entry(rt_entry, &routing_table[j], list) { down_read(&rt_entry->lock_lha4); i += scnprintf(buf + i, max - i, "Node Id: 0x%08x\n", rt_entry->node_id); if (rt_entry->node_id == IPC_ROUTER_NID_LOCAL) { i += scnprintf(buf + i, max - i, "XPRT Name: Loopback\n"); i += scnprintf(buf + i, max - i, "Next Hop: %d\n", rt_entry->node_id); } else { i += scnprintf(buf + i, max - i, "XPRT Name: %s\n", rt_entry->xprt_info->xprt->name); i += scnprintf(buf + i, max - i, "Next Hop: 0x%08x\n", rt_entry->xprt_info->remote_node_id); } i += scnprintf(buf + i, max - i, "\n"); up_read(&rt_entry->lock_lha4); } up_read(&routing_table_lock_lha3); } return i; } static int dump_xprt_info(char *buf, int max) { int i = 0; struct msm_ipc_router_xprt_info *xprt_info; down_read(&xprt_info_list_lock_lha5); list_for_each_entry(xprt_info, &xprt_info_list, list) { i += scnprintf(buf + i, max - i, "XPRT Name: %s\n", xprt_info->xprt->name); i += scnprintf(buf + i, max - i, "Link Id: %d\n", xprt_info->xprt->link_id); i += scnprintf(buf + i, max - i, "Initialized: %s\n", (xprt_info->initialized ? "Y" : "N")); i += scnprintf(buf + i, max - i, "Remote Node Id: 0x%08x\n", xprt_info->remote_node_id); i += scnprintf(buf + i, max - i, "\n"); } up_read(&xprt_info_list_lock_lha5); return i; } static int dump_servers(char *buf, int max) { int i = 0, j; struct msm_ipc_server *server; struct msm_ipc_server_port *server_port; down_read(&server_list_lock_lha2); for (j = 0; j < SRV_HASH_SIZE; j++) { list_for_each_entry(server, &server_list[j], list) { list_for_each_entry(server_port, &server->server_port_list, list) { i += scnprintf(buf + i, max - i, "Service: 0x%08x\n", server->name.service); i += scnprintf(buf + i, max - i, "Instance: 0x%08x\n", server->name.instance); i += scnprintf(buf + i, max - i, "Node_id: 0x%08x\n", server_port->server_addr.node_id); i += scnprintf(buf + i, max - i, "Port_id: 0x%08x\n", server_port->server_addr.port_id); i += scnprintf(buf + i, max - i, "\n"); } } } up_read(&server_list_lock_lha2); return i; } static int dump_remote_ports(char *buf, int max) { int i = 0, j, k; struct msm_ipc_router_remote_port *rport_ptr; struct msm_ipc_routing_table_entry *rt_entry; for (j = 0; j < RT_HASH_SIZE; j++) { down_read(&routing_table_lock_lha3); list_for_each_entry(rt_entry, &routing_table[j], list) { down_read(&rt_entry->lock_lha4); for (k = 0; k < RP_HASH_SIZE; k++) { list_for_each_entry(rport_ptr, &rt_entry->remote_port_list[k], list) { i += scnprintf(buf + i, max - i, "Node_id: 0x%08x\n", rport_ptr->node_id); i += scnprintf(buf + i, max - i, "Port_id: 0x%08x\n", rport_ptr->port_id); i += scnprintf(buf + i, max - i, "Quota_cnt: %d\n", rport_ptr->tx_quota_cnt); i += scnprintf(buf + i, max - i, "\n"); } } up_read(&rt_entry->lock_lha4); } up_read(&routing_table_lock_lha3); } return i; } static int dump_control_ports(char *buf, int max) { int i = 0; struct msm_ipc_port *port_ptr; down_read(&control_ports_lock_lha5); list_for_each_entry(port_ptr, &control_ports, list) { i += scnprintf(buf + i, max - i, "Node_id: 0x%08x\n", port_ptr->this_port.node_id); i += scnprintf(buf + i, max - i, "Port_id: 0x%08x\n", port_ptr->this_port.port_id); i += scnprintf(buf + i, max - i, "\n"); } up_read(&control_ports_lock_lha5); return i; } static int dump_local_ports(char *buf, int max) { int i = 0, j; struct msm_ipc_port *port_ptr; down_read(&local_ports_lock_lha2); for (j = 0; j < LP_HASH_SIZE; j++) { list_for_each_entry(port_ptr, &local_ports[j], list) { mutex_lock(&port_ptr->port_lock_lhb1); i += scnprintf(buf + i, max - i, "Node_id: 0x%08x\n", port_ptr->this_port.node_id); i += scnprintf(buf + i, max - i, "Port_id: 0x%08x\n", port_ptr->this_port.port_id); i += scnprintf(buf + i, max - i, "# pkts tx'd %d\n", port_ptr->num_tx); i += scnprintf(buf + i, max - i, "# pkts rx'd %d\n", port_ptr->num_rx); i += scnprintf(buf + i, max - i, "# bytes tx'd %ld\n", port_ptr->num_tx_bytes); i += scnprintf(buf + i, max - i, "# bytes rx'd %ld\n", port_ptr->num_rx_bytes); mutex_unlock(&port_ptr->port_lock_lhb1); i += scnprintf(buf + i, max - i, "\n"); } } up_read(&local_ports_lock_lha2); return i; } #define DEBUG_BUFMAX 4096 static char debug_buffer[DEBUG_BUFMAX]; static ssize_t debug_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { int (*fill)(char *buf, int max) = file->private_data; int bsize = fill(debug_buffer, DEBUG_BUFMAX); return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize); } static int debug_open(struct inode *inode, struct file *file) { file->private_data = inode->i_private; return 0; } static const struct file_operations debug_ops = { .read = debug_read, .open = debug_open, }; static void debug_create(const char *name, mode_t mode, struct dentry *dent, int (*fill)(char *buf, int max)) { debugfs_create_file(name, mode, dent, fill, &debug_ops); } static void debugfs_init(void) { struct dentry *dent; dent = debugfs_create_dir("msm_ipc_router", 0); if (IS_ERR(dent)) return; debug_create("dump_local_ports", 0444, dent, dump_local_ports); debug_create("dump_remote_ports", 0444, dent, dump_remote_ports); debug_create("dump_control_ports", 0444, dent, dump_control_ports); debug_create("dump_servers", 0444, dent, dump_servers); debug_create("dump_xprt_info", 0444, dent, dump_xprt_info); debug_create("dump_routing_table", 0444, dent, dump_routing_table); } #else static void debugfs_init(void) {} #endif static int msm_ipc_router_add_xprt(struct msm_ipc_router_xprt *xprt) { struct msm_ipc_router_xprt_info *xprt_info; struct msm_ipc_routing_table_entry *rt_entry; xprt_info = kmalloc(sizeof(struct msm_ipc_router_xprt_info), GFP_KERNEL); if (!xprt_info) return -ENOMEM; xprt_info->xprt = xprt; xprt_info->initialized = 0; xprt_info->remote_node_id = -1; INIT_LIST_HEAD(&xprt_info->pkt_list); mutex_init(&xprt_info->rx_lock_lhb2); mutex_init(&xprt_info->tx_lock_lhb2); wakeup_source_init(&xprt_info->ws, xprt->name); xprt_info->need_len = 0; xprt_info->abort_data_read = 0; INIT_WORK(&xprt_info->read_data, do_read_data); INIT_LIST_HEAD(&xprt_info->list); xprt_info->workqueue = create_singlethread_workqueue(xprt->name); if (!xprt_info->workqueue) { kfree(xprt_info); return -ENOMEM; } if (!strcmp(xprt->name, "msm_ipc_router_loopback_xprt")) { xprt_info->remote_node_id = IPC_ROUTER_NID_LOCAL; xprt_info->initialized = 1; } down_write(&xprt_info_list_lock_lha5); list_add_tail(&xprt_info->list, &xprt_info_list); up_write(&xprt_info_list_lock_lha5); down_write(&routing_table_lock_lha3); if (!routing_table_inited) { init_routing_table(); rt_entry = alloc_routing_table_entry(IPC_ROUTER_NID_LOCAL); add_routing_table_entry(rt_entry); routing_table_inited = 1; } up_write(&routing_table_lock_lha3); xprt->priv = xprt_info; return 0; } static void msm_ipc_router_remove_xprt(struct msm_ipc_router_xprt *xprt) { struct msm_ipc_router_xprt_info *xprt_info; if (xprt && xprt->priv) { xprt_info = xprt->priv; mutex_lock(&xprt_info->rx_lock_lhb2); xprt_info->abort_data_read = 1; mutex_unlock(&xprt_info->rx_lock_lhb2); down_write(&xprt_info_list_lock_lha5); list_del(&xprt_info->list); up_write(&xprt_info_list_lock_lha5); flush_workqueue(xprt_info->workqueue); destroy_workqueue(xprt_info->workqueue); wakeup_source_trash(&xprt_info->ws); xprt->priv = 0; kfree(xprt_info); } } struct msm_ipc_router_xprt_work { struct msm_ipc_router_xprt *xprt; struct work_struct work; }; static void xprt_open_worker(struct work_struct *work) { struct msm_ipc_router_xprt_work *xprt_work = container_of(work, struct msm_ipc_router_xprt_work, work); msm_ipc_router_add_xprt(xprt_work->xprt); kfree(xprt_work); } static void xprt_close_worker(struct work_struct *work) { struct msm_ipc_router_xprt_work *xprt_work = container_of(work, struct msm_ipc_router_xprt_work, work); msm_ipc_cleanup_routing_table(xprt_work->xprt->priv); msm_ipc_router_remove_xprt(xprt_work->xprt); xprt_work->xprt->sft_close_done(xprt_work->xprt); kfree(xprt_work); } void msm_ipc_router_xprt_notify(struct msm_ipc_router_xprt *xprt, unsigned event, void *data) { struct msm_ipc_router_xprt_info *xprt_info = xprt->priv; struct msm_ipc_router_xprt_work *xprt_work; struct rr_packet *pkt; unsigned long ret; if (!msm_ipc_router_workqueue) { ret = wait_for_completion_timeout(&msm_ipc_local_router_up, IPC_ROUTER_INIT_TIMEOUT); if (!ret || !msm_ipc_router_workqueue) { IPC_RTR_ERR("%s: IPC Router not initialized\n", __func__); return; } } switch (event) { case IPC_ROUTER_XPRT_EVENT_OPEN: D("open event for '%s'\n", xprt->name); xprt_work = kmalloc(sizeof(struct msm_ipc_router_xprt_work), GFP_ATOMIC); if (xprt_work) { xprt_work->xprt = xprt; INIT_WORK(&xprt_work->work, xprt_open_worker); queue_work(msm_ipc_router_workqueue, &xprt_work->work); } else { IPC_RTR_ERR( "%s: malloc failure - Couldn't notify OPEN event", __func__); } break; case IPC_ROUTER_XPRT_EVENT_CLOSE: D("close event for '%s'\n", xprt->name); xprt_work = kmalloc(sizeof(struct msm_ipc_router_xprt_work), GFP_ATOMIC); if (xprt_work) { xprt_work->xprt = xprt; INIT_WORK(&xprt_work->work, xprt_close_worker); queue_work(msm_ipc_router_workqueue, &xprt_work->work); } else { IPC_RTR_ERR( "%s: malloc failure - Couldn't notify CLOSE event", __func__); } break; } if (!data) return; while (!xprt_info) { msleep(100); xprt_info = xprt->priv; } pkt = clone_pkt((struct rr_packet *)data); if (!pkt) return; mutex_lock(&xprt_info->rx_lock_lhb2); list_add_tail(&pkt->list, &xprt_info->pkt_list); __pm_stay_awake(&xprt_info->ws); mutex_unlock(&xprt_info->rx_lock_lhb2); queue_work(xprt_info->workqueue, &xprt_info->read_data); } static int __init msm_ipc_router_init(void) { int i, ret; struct msm_ipc_routing_table_entry *rt_entry; #if defined(CONFIG_ARCH_MSM) msm_ipc_router_debug_mask |= SMEM_LOG; #endif ipc_rtr_log_ctxt = ipc_log_context_create(IPC_RTR_LOG_PAGES, "ipc_router"); if (!ipc_rtr_log_ctxt) IPC_RTR_ERR("%s: Unable to create IPC logging for IPC RTR", __func__); debugfs_init(); for (i = 0; i < SRV_HASH_SIZE; i++) INIT_LIST_HEAD(&server_list[i]); for (i = 0; i < LP_HASH_SIZE; i++) INIT_LIST_HEAD(&local_ports[i]); down_write(&routing_table_lock_lha3); if (!routing_table_inited) { init_routing_table(); rt_entry = alloc_routing_table_entry(IPC_ROUTER_NID_LOCAL); add_routing_table_entry(rt_entry); routing_table_inited = 1; } up_write(&routing_table_lock_lha3); ret = msm_ipc_router_init_sockets(); if (ret < 0) IPC_RTR_ERR("%s: Init sockets failed\n", __func__); ret = msm_ipc_router_security_init(); if (ret < 0) IPC_RTR_ERR("%s: Security Init failed\n", __func__); msm_ipc_router_workqueue = create_singlethread_workqueue("msm_ipc_router"); if (!msm_ipc_router_workqueue) return -ENOMEM; complete_all(&msm_ipc_local_router_up); return ret; } module_init(msm_ipc_router_init); MODULE_DESCRIPTION("MSM IPC Router"); MODULE_LICENSE("GPL v2");
ShinySide/SM-A700F
net/ipc_router/ipc_router_core.c
C
gpl-2.0
97,987
.block_settings .block_tree ul { margin-left: 18px; } .block_settings .block_tree p.hasicon { text-indent: -21px; padding-left: 21px; } .block_settings .block_tree p.hasicon img { width: 16px; height: 16px; margin-top: 3px; margin-right: 5px; vertical-align: top; } .block_settings .block_tree p.hasicon.visibleifjs { display: block; } .block_settings .block_tree .tree_item.branch { padding-left: 21px; } .block_settings .block_tree .tree_item { cursor: pointer; margin: 3px 0; background-position: 0 50%; background-repeat: no-repeat; } .block_settings .block_tree .active_tree_node { font-weight: bold; } .block_settings .block_tree [aria-expanded="true"] { background-image: url('[[pix:t/expanded]]'); } .block_settings .block_tree [aria-expanded="false"] { background-image: url('[[pix:t/collapsed]]'); } .block_settings .block_tree [aria-expanded="true"].emptybranch { background-image: url('[[pix:t/collapsed_empty]]'); } .block_settings .block_tree [aria-expanded="false"].loading { background-image: url('[[pix:i/loading_small]]'); } /*rtl:raw: .block_settings .block_tree [aria-expanded="false"] {background-image: url('[[pix:t/collapsed_rtl]]');} .block_settings .block_tree [aria-expanded="true"].emptybranch {background-image: url('[[pix:t/collapsed_empty_rtl]]');} .block_settings .block_tree [aria-expanded="false"].loading {background-image: url('[[pix:i/loading_small]]');} */ .block_settings .block_tree [aria-hidden="false"] { display: block; } .block_settings .block_tree [aria-hidden="true"]:not(.icon) { display: none; }
merrill-oakland/moodle
blocks/settings/styles.css
CSS
gpl-3.0
1,641
describe("", function() { var rootEl; beforeEach(function() { rootEl = browser.rootEl; browser.get("build/docs/examples/example-sanitize-service/index-jquery.html"); }); it('should sanitize the html snippet by default', function() { expect(element(by.css('#bind-html-with-sanitize div')).getAttribute('innerHTML')). toBe('<p>an html\n<em>click here</em>\nsnippet</p>'); }); it('should inline raw snippet if bound to a trusted value', function() { expect(element(by.css('#bind-html-with-trust div')).getAttribute('innerHTML')). toBe("<p style=\"color:blue\">an html\n" + "<em onmouseover=\"this.textContent='PWN3D!'\">click here</em>\n" + "snippet</p>"); }); it('should escape snippet without any filter', function() { expect(element(by.css('#bind-default div')).getAttribute('innerHTML')). toBe("&lt;p style=\"color:blue\"&gt;an html\n" + "&lt;em onmouseover=\"this.textContent='PWN3D!'\"&gt;click here&lt;/em&gt;\n" + "snippet&lt;/p&gt;"); }); it('should update', function() { element(by.model('snippet')).clear(); element(by.model('snippet')).sendKeys('new <b onclick="alert(1)">text</b>'); expect(element(by.css('#bind-html-with-sanitize div')).getAttribute('innerHTML')). toBe('new <b>text</b>'); expect(element(by.css('#bind-html-with-trust div')).getAttribute('innerHTML')).toBe( 'new <b onclick="alert(1)">text</b>'); expect(element(by.css('#bind-default div')).getAttribute('innerHTML')).toBe( "new &lt;b onclick=\"alert(1)\"&gt;text&lt;/b&gt;"); }); });
LearnNavi/Naranawm
www/assets/library/angular-1.6.5/docs/ptore2e/example-sanitize-service/jquery_test.js
JavaScript
agpl-3.0
1,558
<?php final class PhabricatorProjectCustomFieldNumericIndex extends PhabricatorCustomFieldNumericIndexStorage { public function getApplicationName() { return 'project'; } }
leolujuyi/phabricator
src/applications/project/storage/PhabricatorProjectCustomFieldNumericIndex.php
PHP
apache-2.0
186
# Copyright 2007 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer for dict methods. d.keys() -> list(d.keys()) d.items() -> list(d.items()) d.values() -> list(d.values()) d.iterkeys() -> iter(d.keys()) d.iteritems() -> iter(d.items()) d.itervalues() -> iter(d.values()) d.viewkeys() -> d.keys() d.viewitems() -> d.items() d.viewvalues() -> d.values() Except in certain very specific contexts: the iter() can be dropped when the context is list(), sorted(), iter() or for...in; the list() can be dropped when the context is list() or sorted() (but not iter() or for...in!). Special contexts that apply to both: list(), sorted(), tuple() set(), any(), all(), sum(). Note: iter(d.keys()) could be written as iter(d) but since the original d.iterkeys() was also redundant we don't fix this. And there are (rare) contexts where it makes a difference (e.g. when passing it as an argument to a function that introspects the argument). """ # Local imports from .. import pytree from .. import patcomp from ..pgen2 import token from .. import fixer_base from ..fixer_util import Name, Call, LParen, RParen, ArgList, Dot from .. import fixer_util iter_exempt = fixer_util.consuming_calls | set(["iter"]) class FixDict(fixer_base.BaseFix): BM_compatible = True PATTERN = """ power< head=any+ trailer< '.' method=('keys'|'items'|'values'| 'iterkeys'|'iteritems'|'itervalues'| 'viewkeys'|'viewitems'|'viewvalues') > parens=trailer< '(' ')' > tail=any* > """ def transform(self, node, results): head = results["head"] method = results["method"][0] # Extract node for method name tail = results["tail"] syms = self.syms method_name = method.value isiter = method_name.startswith(u"iter") isview = method_name.startswith(u"view") if isiter or isview: method_name = method_name[4:] assert method_name in (u"keys", u"items", u"values"), repr(method) head = [n.clone() for n in head] tail = [n.clone() for n in tail] special = not tail and self.in_special_context(node, isiter) args = head + [pytree.Node(syms.trailer, [Dot(), Name(method_name, prefix=method.prefix)]), results["parens"].clone()] new = pytree.Node(syms.power, args) if not (special or isview): new.prefix = u"" new = Call(Name(u"iter" if isiter else u"list"), [new]) if tail: new = pytree.Node(syms.power, [new] + tail) new.prefix = node.prefix return new P1 = "power< func=NAME trailer< '(' node=any ')' > any* >" p1 = patcomp.compile_pattern(P1) P2 = """for_stmt< 'for' any 'in' node=any ':' any* > | comp_for< 'for' any 'in' node=any any* > """ p2 = patcomp.compile_pattern(P2) def in_special_context(self, node, isiter): if node.parent is None: return False results = {} if (node.parent.parent is not None and self.p1.match(node.parent.parent, results) and results["node"] is node): if isiter: # iter(d.iterkeys()) -> iter(d.keys()), etc. return results["func"].value in iter_exempt else: # list(d.keys()) -> list(d.keys()), etc. return results["func"].value in fixer_util.consuming_calls if not isiter: return False # for ... in d.iterkeys() -> for ... in d.keys(), etc. return self.p2.match(node.parent, results) and results["node"] is node
nmercier/linux-cross-gcc
win32/bin/Lib/lib2to3/fixes/fix_dict.py
Python
bsd-3-clause
3,931
using System; using System.Collections.Generic; using System.Linq; using Orchard.Mvc; namespace Orchard.Localization.Services { public class CurrentCultureWorkContext : IWorkContextStateProvider { private readonly IEnumerable<ICultureSelector> _cultureSelectors; private readonly IHttpContextAccessor _httpContextAccessor; public CurrentCultureWorkContext(IEnumerable<ICultureSelector> cultureSelectors, IHttpContextAccessor httpContextAccessor) { _cultureSelectors = cultureSelectors; _httpContextAccessor = httpContextAccessor; } public Func<WorkContext, T> Get<T>(string name) { if (name == "CurrentCulture") { var cultureName = GetCurrentCulture(); return ctx => (T)(object)cultureName; } return null; } private string GetCurrentCulture() { var httpContext = _httpContextAccessor.Current(); var culture = _cultureSelectors .Select(c => c.GetCulture(httpContext)) .Where(c => c != null) .OrderByDescending(c => c.Priority) .FirstOrDefault(c => !String.IsNullOrEmpty(c.CultureName)); return culture == null ? String.Empty : culture.CultureName; } } }
Codinlab/Orchard
src/Orchard/Localization/Services/CurrentCultureWorkContext.cs
C#
bsd-3-clause
1,338
/** * @license * Copyright Google Inc. All Rights Reserved. * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.io/license */ import { BaseException } from '../facade/exceptions'; import { Type } from '../facade/lang'; export declare class InvalidPipeArgumentException extends BaseException { constructor(type: Type, value: Object); }
vidyutron/vidyutron.github.io
vendor/libs/@angular/common/src/pipes/invalid_pipe_argument_exception.d.ts
TypeScript
mit
416
""" Handled exceptions raised by REST framework. In addition Django's built in 403 and 404 exceptions are handled. (`django.http.Http404` and `django.core.exceptions.PermissionDenied`) """ from __future__ import unicode_literals import math from django.utils import six from django.utils.encoding import force_text from django.utils.translation import ugettext_lazy as _ from django.utils.translation import ungettext from rest_framework import status from rest_framework.utils.serializer_helpers import ReturnDict, ReturnList def _force_text_recursive(data): """ Descend into a nested data structure, forcing any lazy translation strings into plain text. """ if isinstance(data, list): ret = [ _force_text_recursive(item) for item in data ] if isinstance(data, ReturnList): return ReturnList(ret, serializer=data.serializer) return data elif isinstance(data, dict): ret = dict([ (key, _force_text_recursive(value)) for key, value in data.items() ]) if isinstance(data, ReturnDict): return ReturnDict(ret, serializer=data.serializer) return data return force_text(data) class APIException(Exception): """ Base class for REST framework exceptions. Subclasses should provide `.status_code` and `.default_detail` properties. """ status_code = status.HTTP_500_INTERNAL_SERVER_ERROR default_detail = _('A server error occurred.') def __init__(self, detail=None): if detail is not None: self.detail = force_text(detail) else: self.detail = force_text(self.default_detail) def __str__(self): return self.detail # The recommended style for using `ValidationError` is to keep it namespaced # under `serializers`, in order to minimize potential confusion with Django's # built in `ValidationError`. For example: # # from rest_framework import serializers # raise serializers.ValidationError('Value was invalid') class ValidationError(APIException): status_code = status.HTTP_400_BAD_REQUEST def __init__(self, detail): # For validation errors the 'detail' key is always required. # The details should always be coerced to a list if not already. if not isinstance(detail, dict) and not isinstance(detail, list): detail = [detail] self.detail = _force_text_recursive(detail) def __str__(self): return six.text_type(self.detail) class ParseError(APIException): status_code = status.HTTP_400_BAD_REQUEST default_detail = _('Malformed request.') class AuthenticationFailed(APIException): status_code = status.HTTP_401_UNAUTHORIZED default_detail = _('Incorrect authentication credentials.') class NotAuthenticated(APIException): status_code = status.HTTP_401_UNAUTHORIZED default_detail = _('Authentication credentials were not provided.') class PermissionDenied(APIException): status_code = status.HTTP_403_FORBIDDEN default_detail = _('You do not have permission to perform this action.') class NotFound(APIException): status_code = status.HTTP_404_NOT_FOUND default_detail = _('Not found.') class MethodNotAllowed(APIException): status_code = status.HTTP_405_METHOD_NOT_ALLOWED default_detail = _('Method "{method}" not allowed.') def __init__(self, method, detail=None): if detail is not None: self.detail = force_text(detail) else: self.detail = force_text(self.default_detail).format(method=method) class NotAcceptable(APIException): status_code = status.HTTP_406_NOT_ACCEPTABLE default_detail = _('Could not satisfy the request Accept header.') def __init__(self, detail=None, available_renderers=None): if detail is not None: self.detail = force_text(detail) else: self.detail = force_text(self.default_detail) self.available_renderers = available_renderers class UnsupportedMediaType(APIException): status_code = status.HTTP_415_UNSUPPORTED_MEDIA_TYPE default_detail = _('Unsupported media type "{media_type}" in request.') def __init__(self, media_type, detail=None): if detail is not None: self.detail = force_text(detail) else: self.detail = force_text(self.default_detail).format( media_type=media_type ) class Throttled(APIException): status_code = status.HTTP_429_TOO_MANY_REQUESTS default_detail = _('Request was throttled.') extra_detail_singular = 'Expected available in {wait} second.' extra_detail_plural = 'Expected available in {wait} seconds.' def __init__(self, wait=None, detail=None): if detail is not None: self.detail = force_text(detail) else: self.detail = force_text(self.default_detail) if wait is None: self.wait = None else: self.wait = math.ceil(wait) self.detail += ' ' + force_text(ungettext( self.extra_detail_singular.format(wait=self.wait), self.extra_detail_plural.format(wait=self.wait), self.wait ))
devs1991/test_edx_docmode
venv/lib/python2.7/site-packages/rest_framework/exceptions.py
Python
agpl-3.0
5,266
module ActiveMerchant #:nodoc: module Billing #:nodoc: class OrbitalSoftDescriptors include Validateable PHONE_FORMAT_1 = /\A\d{3}-\d{3}-\d{4}\z/ PHONE_FORMAT_2 = /\A\d{3}-\w{7}\z/ # ==== Tampa PNS Soft Descriptors # The support for Soft Descriptors via the PNS Host is only for customers processing through Chase # Paymentech Canada. # Unlike Salem, the only value that gets passed on the cardholder statement is the Merchant Name field. # And for these customers, it is a maximum of 25 bytes of data. # # All other Soft Descriptor fields can optionally be sent, but will not be submitted to the settlement host # and will not display on the cardholder statement. attr_accessor :merchant_name, :product_description, :merchant_city, :merchant_phone, :merchant_url, :merchant_email def initialize(options = {}) self.merchant_name = options[:merchant_name] self.merchant_city = options[:merchant_city] self.merchant_phone = options[:merchant_phone] self.merchant_url = options[:merchant_url] self.merchant_email = options[:merchant_email] end def validate errors.add(:merchant_name, "is required") if self.merchant_name.blank? errors.add(:merchant_name, "is required to be 25 bytes or less") if self.merchant_name.bytesize > 25 unless self.merchant_phone.blank? || self.merchant_phone.match(PHONE_FORMAT_1) || self.merchant_phone.match(PHONE_FORMAT_2) errors.add(:merchant_phone, "is required to follow \"NNN-NNN-NNNN\" or \"NNN-AAAAAAA\" format") end [:merchant_email, :merchant_url].each do |attr| unless self.send(attr).blank? errors.add(attr, "is required to be 13 bytes or less") if self.send(attr).bytesize > 13 end end end end end end
iressgrad15/growstuff
vendor/gems/activemerchant-1.33.0/lib/active_merchant/billing/gateways/orbital/orbital_soft_descriptors.rb
Ruby
agpl-3.0
1,946
// Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. namespace System.Net.WebSockets { public enum WebSocketError { Success = 0, InvalidMessageType = 1, Faulted = 2, NativeError = 3, NotAWebSocket = 4, UnsupportedVersion = 5, UnsupportedProtocol = 6, HeaderError = 7, ConnectionClosedPrematurely = 8, InvalidState = 9 } }
Yanjing123/corefx
src/System.Net.WebSockets/src/System/Net/WebSockets/WebSocketError.cs
C#
mit
513
<!DOCTYPE html> <html> <head> <meta charset="UTF-8"> <title>QUnit Main Test Suite</title> <link rel="stylesheet" href="../qunit/qunit.css"> <script src="../qunit/qunit.js"></script> <script src="test.js"></script> <script src="deepEqual.js"></script> <script src="swarminject.js"></script> </head> <body> <div id="qunit"></div> <div id="qunit-fixture">test markup</div> </body> </html>
hank7444/ember1.81_pratice
vendor/qunit/test/index.html
HTML
mit
395
CasperJS Sphinx Documentation ============================= Sphinx documentation for [CasperJS](http://casperjs.org/) 1.1-DEV and future versions. Work in progress.
awholegunch/loom
ui/node_modules/casperjs/docs/README.md
Markdown
apache-2.0
166
SELECT 'Upgrading MetaStore schema from 0.10.0 to 0.11.0' AS Status from dual; SELECT 'Finished upgrading MetaStore schema from 0.10.0 to 0.11.0' AS Status from dual;
scalingdata/Impala
thirdparty/hive-1.2.1.2.3.0.0-2557/src/metastore/scripts/upgrade/oracle/upgrade-0.10.0-to-0.11.0.oracle.sql
SQL
apache-2.0
167
cask 'quicklock' do version '1.5' sha256 '8d3847602419040f860964bad937abddbd1b7b3ccd7f0dcd3bc0d083f16d3295' # amazonaws.com/f.cl.ly was verified as official when first introduced to the cask url 'https://s3.amazonaws.com/f.cl.ly/items/1e3G0g2b1g0z201m3j1f/QuickLock.app.zip' appcast 'http://quicklockapp.com/appcast.xml', checkpoint: '582556c374e09cc083dc8b3085787ce8323bb38ba6396ccf0327d08f35fc7a94' name 'QuickLock' homepage 'http://www.quicklockapp.com/' license :unknown # TODO: change license and remove this comment; ':unknown' is a machine-generated placeholder app 'QuickLock.app' end
farmerchris/homebrew-cask
Casks/quicklock.rb
Ruby
bsd-2-clause
623
<!DOCTYPE html> <html> <head> <title>Instantiation of custom element: custom element type is given via the local name of the custom element</title> <meta name="author" title="Sergey G. Grekhov" href="mailto:[email protected]"> <meta name="author" title="Aleksei Yu. Semenov" href="mailto:[email protected]"> <meta name="assert" content="The custom element type is given to a custom element at the time of its instantation in one of the two ways: 1. As the local name of the custom element."> <link rel="help" href="http://www.w3.org/TR/custom-elements/#instantiating-custom-elements"> <script src="/resources/testharness.js"></script> <script src="/resources/testharnessreport.js"></script> <script src="../testcommon.js"></script> </head> <body> <div id="log"></div> <script> test(function() { var doc = newHTMLDocument(); var GeneratedConstructor = doc.registerElement('x-a'); doc.body.innerHTML = '<x-a id="x-a"></x-a>'; var customElement = doc.querySelector('#x-a'); assert_equals(Object.getPrototypeOf(customElement), GeneratedConstructor.prototype, 'Custom element type should be the type, specified by the local name of ' + 'the custom element'); }, 'Test custom element type, which is given via the local name of the custom element. ' + 'Custom element created via innerHTML property'); testInIFrame('../resources/x-element.html', function(doc) { var GeneratedConstructor = doc.registerElement('x-element'); var xelement = doc.querySelector('#x-element'); assert_equals(Object.getPrototypeOf(xelement), GeneratedConstructor.prototype, 'Custom element type should be the type, specified by the local name of ' + 'the custom element'); }, 'Test custom element type, which is given via the local name of the custom element. ' + 'Custom element is defined in loaded HTML document'); </script> </body> </html>
danakj/chromium
third_party/WebKit/LayoutTests/imported/wpt/custom-elements/v0/instantiating/custom-element-type-local-name.html
HTML
bsd-3-clause
1,890
# Vertex Cache Optimised Index Buffer Compression This is a small proof of concept for compressing and decompressing index buffer triangle lists. It's designed to maintain the order of the triangle list and perform best with a triangle list that has been vertex cache post-transform optimised (a pre-transform cache optimisation is done as part of the compression). It's also designed to be relatively lightweight, with a decompression throughput in the tens of millions of triangles per core. It does not achieve state of the art levels of compression levels (which can be less than a bit per triangle, as well as providing good chances for vertex prediction), but it does maintain ordering of triangles and support arbitrary topologies. There are some cases where the vertices within a triangle are re-ordered, but the general winding direction is maintained. ## How does it work? The inspiration was a mix of Fabian Giesen's [Simple loss-less index buffer compression](http://fgiesen.wordpress.com/2013/12/14/simple-lossless-index-buffer-compression/) and the higher compression algorithms that make use of shared edges and re-order triangles. The idea was that there is probably a middle ground between them. The basic goals were: * Maintain the ordering of triangles, exploiting vertex cache optimal ordering. * Exploit recent triangle connectivity. * Make it fast, especially for decompression, without the need to maintain large extra data structures, like winged edge. * Make it simple enough to be easily understandable. The vertex cache optimisation means that there will be quite a few vertices and edges shared between the next triangle in the list and the previous. We exploit this by maintaining two relatively small fixed size FIFOs, an edge FIFO and a vertex FIFO (not unlike the vertex cache itself, except we store recent indices). The compression relies on 4 codes: 1. A _new vertex_ code, for vertices that have not yet been seen. 2. A _cached edge_ code, for edges that have been seen recently. This code is followed by a relative index back into the edge FIFO. 3. A _cached vertex_ code, for vertices that have been seen recently. This code is followed by a relative index back into the vertex FIFO. 4. A _free vertex_ code, for vertices that have been seen, but not recently. This code is followed by a variable length integer encoding of the index relative to the most recent new vertex. Triangles can either consist of two codes, a cached edge followed by one of the vertex codes, or of 3 of the vertex codes. The most common codes in an optimised mesh are generally the cached edge and new vertex codes. Cached edges are always the first code in any triangle they appear in and may correspond to any edge in the original triangle (we check all the edges against the FIFO). This means that an individual triangle may have its vertices specified in a different order (but in the same winding direction) than the original uncompressed one. New vertex codes work because vertices are re-ordered to the order in which they appear in the mesh, meaning whenever we encounter a new vertex, we can just read and an internal counter to get the current index, incrementing it afterwards. This has the benefit of also meaning vertices are in pre-transform cache optimised order. ## Does it actually work? That's a better question! While my thoughts were that in theory it would average around 11-12bits a triangle, the Stanford Armadillo mesh (optimised with Tom Forsyth's vertex cache optimisation algorithm), with 345944 triangles, compresses the index buffer down to 563122 bytes, which is more like 13 and the Stanford Bunny is 12.85bits or so. This is not anywhere near the state of the art in terms of compression (which get down to less than a bit), but that isn't the goal. Performance wise, with the code posted here, the Armadillo compresses in 18.5 milliseconds and decompresses in 6.6 milliseconds on average on my system. The Stanford bunny is more like 1.4 milliseconds to decompress, relatively. ## Update! I've added a second more efficient (in terms of both speed and size) compression algorithm (CompressIndexBuffer2 and DecompressIndexBuffer2), as well as some changes upstream from Branimir Karadžić, who made some compiler compatibility fixes and added 16bit indice support. This uses a code per triangle instead of multiple codes for different cases. For details of the original algorithm, please see this [blog post](http://conorstokes.github.io/graphics/2014/09/28/vertex-cache-optimised-index-buffer-compression/). For details of the second algorithm, please see this [blog post](http://conorstokes.github.io/graphics/2014/09/28/vertex-cache-optimised-index-buffer-compression/).
ashemedai/ProDBG
src/native/external/bgfx/3rdparty/ib-compress/README.md
Markdown
mit
4,756
namespace Nancy.Demo.Hosting.Aspnet.Models { using System; [Serializable] public class Payload : IEquatable<Payload> { public int IntValue { get; private set; } public bool BoolValue { get; private set; } public string StringValue { get; private set; } /// <summary> /// Initializes a new instance of the <see cref="T:System.Object"/> class. /// </summary> public Payload(int intValue, bool boolValue, string stringValue) { this.IntValue = intValue; this.BoolValue = boolValue; this.StringValue = stringValue; } public bool Equals(Payload other) { if (ReferenceEquals(null, other)) { return false; } if (ReferenceEquals(this, other)) { return true; } return other.IntValue == this.IntValue && other.BoolValue.Equals(this.BoolValue) && Equals(other.StringValue, this.StringValue); } public override bool Equals(object obj) { if (ReferenceEquals(null, obj)) { return false; } if (ReferenceEquals(this, obj)) { return true; } return obj.GetType() == typeof(Payload) && this.Equals((Payload)obj); } public override int GetHashCode() { unchecked { var result = this.IntValue; result = (result * 397) ^ this.BoolValue.GetHashCode(); result = (result * 397) ^ (this.StringValue != null ? this.StringValue.GetHashCode() : 0); return result; } } public static bool operator ==(Payload left, Payload right) { return Equals(left, right); } public static bool operator !=(Payload left, Payload right) { return !Equals(left, right); } public override string ToString() { return string.Format("{0},{1},{2}", this.StringValue, this.IntValue, this.BoolValue); } } }
murador/Nancy
src/Nancy.Demo.Hosting.Aspnet/Models/Payload.cs
C#
mit
2,282
// Software License Agreement (BSD License) // // Copyright (c) 2010-2015, Deusty, LLC // All rights reserved. // // Redistribution and use of this software in source and binary forms, // with or without modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Neither the name of Deusty nor the names of its contributors may be used // to endorse or promote products derived from this software without specific // prior written permission of Deusty, LLC. #import <Foundation/Foundation.h> #import <QuartzCore/QuartzCore.h> /** * This class represents an NSColor replacement for CLI projects that don't link with AppKit **/ @interface CLIColor : NSObject + (CLIColor *)colorWithCalibratedRed:(CGFloat)red green:(CGFloat)green blue:(CGFloat)blue alpha:(CGFloat)alpha; - (void)getRed:(CGFloat *)red green:(CGFloat *)green blue:(CGFloat *)blue alpha:(CGFloat *)alpha; @end
LambertPark/CNMRC-2.0.0
Vendor/CocoaLumberjack/CLI/CLIColor.h
C
mit
1,046
ace.define("ace/mode/gherkin_highlight_rules",["require","exports","module","ace/lib/oop","ace/mode/text_highlight_rules"], function(require, exports, module) { var oop = require("../lib/oop"); var TextHighlightRules = require("./text_highlight_rules").TextHighlightRules; var stringEscape = "\\\\(x[0-9A-Fa-f]{2}|[0-7]{3}|[\\\\abfnrtv'\"]|U[0-9A-Fa-f]{8}|u[0-9A-Fa-f]{4})"; var GherkinHighlightRules = function() { var languages = [{ name: "en", labels: "Feature|Background|Scenario(?: Outline)?|Examples", keywords: "Given|When|Then|And|But" }]; var labels = languages.map(function(l) { return l.labels; }).join("|"); var keywords = languages.map(function(l) { return l.keywords; }).join("|"); this.$rules = { start : [{ token: 'constant.numeric', regex: "(?:(?:[1-9]\\d*)|(?:0))" }, { token : "comment", regex : "#.*$" }, { token : "keyword", regex : "(?:" + labels + "):|(?:" + keywords + ")\\b", }, { token : "keyword", regex : "\\*", }, { token : "string", // multi line """ string start regex : '"{3}', next : "qqstring3" }, { token : "string", // " string regex : '"', next : "qqstring" }, { token : "text", regex : "^\\s*(?=@[\\w])", next : [{ token : "text", regex : "\\s+", }, { token : "variable.parameter", regex : "@[\\w]+" }, { token : "empty", regex : "", next : "start" }] }, { token : "comment", regex : "<[^>]+>" }, { token : "comment", regex : "\\|(?=.)", next : "table-item" }, { token : "comment", regex : "\\|$", next : "start" }], "qqstring3" : [ { token : "constant.language.escape", regex : stringEscape }, { token : "string", // multi line """ string end regex : '"{3}', next : "start" }, { defaultToken : "string" }], "qqstring" : [{ token : "constant.language.escape", regex : stringEscape }, { token : "string", regex : "\\\\$", next : "qqstring" }, { token : "string", regex : '"|$', next : "start" }, { defaultToken: "string" }], "table-item" : [{ token : "comment", regex : /$/, next : "start" }, { token : "comment", regex : /\|/ }, { token : "string", regex : /\\./ }, { defaultToken : "string" }] }; this.normalizeRules(); } oop.inherits(GherkinHighlightRules, TextHighlightRules); exports.GherkinHighlightRules = GherkinHighlightRules; }); ace.define("ace/mode/gherkin",["require","exports","module","ace/lib/oop","ace/mode/text","ace/mode/gherkin_highlight_rules"], function(require, exports, module) { var oop = require("../lib/oop"); var TextMode = require("./text").Mode; var GherkinHighlightRules = require("./gherkin_highlight_rules").GherkinHighlightRules; var Mode = function() { this.HighlightRules = GherkinHighlightRules; }; oop.inherits(Mode, TextMode); (function() { this.lineCommentStart = "#"; this.$id = "ace/mode/gherkin"; this.getNextLineIndent = function(state, line, tab) { var indent = this.$getIndent(line); var space2 = " "; var tokenizedLine = this.getTokenizer().getLineTokens(line, state); var tokens = tokenizedLine.tokens; console.log(state) if(line.match("[ ]*\\|")) { indent += "| "; } if (tokens.length && tokens[tokens.length-1].type == "comment") { return indent; } if (state == "start") { if (line.match("Scenario:|Feature:|Scenario\ Outline:|Background:")) { indent += space2; } else if(line.match("(Given|Then).+(:)$|Examples:")) { indent += space2; } else if(line.match("\\*.+")) { indent += "* "; } } return indent; }; }).call(Mode.prototype); exports.Mode = Mode; });
protosam/hostcontrol
www/resources/ace-builds-master/src-noconflict/mode-gherkin.js
JavaScript
mit
4,654
<html><body><h1>Access Deny!</h1></body></html>
sxyunfeng/fcms
vos/index.html
HTML
apache-2.0
47
/* Copyright (c) 2017 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package vsan import ( "context" "flag" "fmt" "os" "github.com/vmware/govmomi/govc/cli" "github.com/vmware/govmomi/govc/flags" ) type rm struct { *flags.DatastoreFlag force bool verbose bool } func init() { cli.Register("datastore.vsan.dom.rm", &rm{}) } func (cmd *rm) Register(ctx context.Context, f *flag.FlagSet) { cmd.DatastoreFlag, ctx = flags.NewDatastoreFlag(ctx) cmd.DatastoreFlag.Register(ctx, f) f.BoolVar(&cmd.force, "f", false, "Force delete") f.BoolVar(&cmd.verbose, "v", false, "Print deleted UUIDs to stdout, failed to stderr") } func (cmd *rm) Process(ctx context.Context) error { if err := cmd.DatastoreFlag.Process(ctx); err != nil { return err } return nil } func (cmd *rm) Usage() string { return "UUID..." } func (cmd *rm) Description() string { return `Remove vSAN DOM objects in DS. Examples: govc datastore.vsan.dom.rm d85aa758-63f5-500a-3150-0200308e589c govc datastore.vsan.dom.rm -f d85aa758-63f5-500a-3150-0200308e589c govc datastore.vsan.dom.ls -o | xargs govc datastore.vsan.dom.rm` } func (cmd *rm) Run(ctx context.Context, f *flag.FlagSet) error { if f.NArg() == 0 { return flag.ErrHelp } ds, err := cmd.Datastore() if err != nil { return err } hosts, err := ds.AttachedHosts(ctx) if err != nil { return err } if len(hosts) == 0 { return flag.ErrHelp } m, err := hosts[0].ConfigManager().VsanInternalSystem(ctx) if err != nil { return err } res, err := m.DeleteVsanObjects(ctx, f.Args(), &cmd.force) if err != nil { return err } if cmd.verbose { for _, r := range res { if r.Success { fmt.Fprintln(cmd.Out, r.Uuid) } else { fmt.Fprintf(os.Stderr, "%s %s\n", r.Uuid, r.FailureReason[0].Message) } } } return nil }
childsb/origin
vendor/github.com/vmware/govmomi/govc/datastore/vsan/rm.go
GO
apache-2.0
2,329
// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package bundler supports bundling (batching) of items. Bundling amortizes an // action with fixed costs over multiple items. For example, if an API provides // an RPC that accepts a list of items as input, but clients would prefer // adding items one at a time, then a Bundler can accept individual items from // the client and bundle many of them into a single RPC. // // This package is experimental and subject to change without notice. package bundler import ( "errors" "math" "reflect" "sync" "time" "golang.org/x/net/context" "golang.org/x/sync/semaphore" ) const ( DefaultDelayThreshold = time.Second DefaultBundleCountThreshold = 10 DefaultBundleByteThreshold = 1e6 // 1M DefaultBufferedByteLimit = 1e9 // 1G ) var ( // ErrOverflow indicates that Bundler's stored bytes exceeds its BufferedByteLimit. ErrOverflow = errors.New("bundler reached buffered byte limit") // ErrOversizedItem indicates that an item's size exceeds the maximum bundle size. ErrOversizedItem = errors.New("item size exceeds bundle byte limit") ) // A Bundler collects items added to it into a bundle until the bundle // exceeds a given size, then calls a user-provided function to handle the bundle. type Bundler struct { // Starting from the time that the first message is added to a bundle, once // this delay has passed, handle the bundle. The default is DefaultDelayThreshold. DelayThreshold time.Duration // Once a bundle has this many items, handle the bundle. Since only one // item at a time is added to a bundle, no bundle will exceed this // threshold, so it also serves as a limit. The default is // DefaultBundleCountThreshold. BundleCountThreshold int // Once the number of bytes in current bundle reaches this threshold, handle // the bundle. The default is DefaultBundleByteThreshold. This triggers handling, // but does not cap the total size of a bundle. BundleByteThreshold int // The maximum size of a bundle, in bytes. Zero means unlimited. BundleByteLimit int // The maximum number of bytes that the Bundler will keep in memory before // returning ErrOverflow. The default is DefaultBufferedByteLimit. BufferedByteLimit int // The maximum number of handler invocations that can be running at once. // The default is 1. HandlerLimit int handler func(interface{}) // called to handle a bundle itemSliceZero reflect.Value // nil (zero value) for slice of items flushTimer *time.Timer // implements DelayThreshold mu sync.Mutex sem *semaphore.Weighted // enforces BufferedByteLimit semOnce sync.Once curBundle bundle // incoming items added to this bundle // Each bundle is assigned a unique ticket that determines the order in which the // handler is called. The ticket is assigned with mu locked, but waiting for tickets // to be handled is done via mu2 and cond, below. nextTicket uint64 // next ticket to be assigned mu2 sync.Mutex cond *sync.Cond nextHandled uint64 // next ticket to be handled // In this implementation, active uses space proportional to HandlerLimit, and // waitUntilAllHandled takes time proportional to HandlerLimit each time an acquire // or release occurs, so large values of HandlerLimit max may cause performance // issues. active map[uint64]bool // tickets of bundles actively being handled } type bundle struct { items reflect.Value // slice of item type size int // size in bytes of all items } // NewBundler creates a new Bundler. // // itemExample is a value of the type that will be bundled. For example, if you // want to create bundles of *Entry, you could pass &Entry{} for itemExample. // // handler is a function that will be called on each bundle. If itemExample is // of type T, the argument to handler is of type []T. handler is always called // sequentially for each bundle, and never in parallel. // // Configure the Bundler by setting its thresholds and limits before calling // any of its methods. func NewBundler(itemExample interface{}, handler func(interface{})) *Bundler { b := &Bundler{ DelayThreshold: DefaultDelayThreshold, BundleCountThreshold: DefaultBundleCountThreshold, BundleByteThreshold: DefaultBundleByteThreshold, BufferedByteLimit: DefaultBufferedByteLimit, HandlerLimit: 1, handler: handler, itemSliceZero: reflect.Zero(reflect.SliceOf(reflect.TypeOf(itemExample))), active: map[uint64]bool{}, } b.curBundle.items = b.itemSliceZero b.cond = sync.NewCond(&b.mu2) return b } func (b *Bundler) initSemaphores() { // Create the semaphores lazily, because the user may set limits // after NewBundler. b.semOnce.Do(func() { b.sem = semaphore.NewWeighted(int64(b.BufferedByteLimit)) }) } // Add adds item to the current bundle. It marks the bundle for handling and // starts a new one if any of the thresholds or limits are exceeded. // // If the item's size exceeds the maximum bundle size (Bundler.BundleByteLimit), then // the item can never be handled. Add returns ErrOversizedItem in this case. // // If adding the item would exceed the maximum memory allowed // (Bundler.BufferedByteLimit) or an AddWait call is blocked waiting for // memory, Add returns ErrOverflow. // // Add never blocks. func (b *Bundler) Add(item interface{}, size int) error { // If this item exceeds the maximum size of a bundle, // we can never send it. if b.BundleByteLimit > 0 && size > b.BundleByteLimit { return ErrOversizedItem } // If adding this item would exceed our allotted memory // footprint, we can't accept it. // (TryAcquire also returns false if anything is waiting on the semaphore, // so calls to Add and AddWait shouldn't be mixed.) b.initSemaphores() if !b.sem.TryAcquire(int64(size)) { return ErrOverflow } b.add(item, size) return nil } // add adds item to the current bundle. It marks the bundle for handling and // starts a new one if any of the thresholds or limits are exceeded. func (b *Bundler) add(item interface{}, size int) { b.mu.Lock() defer b.mu.Unlock() // If adding this item to the current bundle would cause it to exceed the // maximum bundle size, close the current bundle and start a new one. if b.BundleByteLimit > 0 && b.curBundle.size+size > b.BundleByteLimit { b.startFlushLocked() } // Add the item. b.curBundle.items = reflect.Append(b.curBundle.items, reflect.ValueOf(item)) b.curBundle.size += size // Start a timer to flush the item if one isn't already running. // startFlushLocked clears the timer and closes the bundle at the same time, // so we only allocate a new timer for the first item in each bundle. // (We could try to call Reset on the timer instead, but that would add a lot // of complexity to the code just to save one small allocation.) if b.flushTimer == nil { b.flushTimer = time.AfterFunc(b.DelayThreshold, b.Flush) } // If the current bundle equals the count threshold, close it. if b.curBundle.items.Len() == b.BundleCountThreshold { b.startFlushLocked() } // If the current bundle equals or exceeds the byte threshold, close it. if b.curBundle.size >= b.BundleByteThreshold { b.startFlushLocked() } } // AddWait adds item to the current bundle. It marks the bundle for handling and // starts a new one if any of the thresholds or limits are exceeded. // // If the item's size exceeds the maximum bundle size (Bundler.BundleByteLimit), then // the item can never be handled. AddWait returns ErrOversizedItem in this case. // // If adding the item would exceed the maximum memory allowed (Bundler.BufferedByteLimit), // AddWait blocks until space is available or ctx is done. // // Calls to Add and AddWait should not be mixed on the same Bundler. func (b *Bundler) AddWait(ctx context.Context, item interface{}, size int) error { // If this item exceeds the maximum size of a bundle, // we can never send it. if b.BundleByteLimit > 0 && size > b.BundleByteLimit { return ErrOversizedItem } // If adding this item would exceed our allotted memory footprint, block // until space is available. The semaphore is FIFO, so there will be no // starvation. b.initSemaphores() if err := b.sem.Acquire(ctx, int64(size)); err != nil { return err } // Here, we've reserved space for item. Other goroutines can call AddWait // and even acquire space, but no one can take away our reservation // (assuming sem.Release is used correctly). So there is no race condition // resulting from locking the mutex after sem.Acquire returns. b.add(item, size) return nil } // Flush invokes the handler for all remaining items in the Bundler and waits // for it to return. func (b *Bundler) Flush() { b.mu.Lock() b.startFlushLocked() // Here, all bundles with tickets < b.nextTicket are // either finished or active. Those are the ones // we want to wait for. t := b.nextTicket b.mu.Unlock() b.initSemaphores() b.waitUntilAllHandled(t) } func (b *Bundler) startFlushLocked() { if b.flushTimer != nil { b.flushTimer.Stop() b.flushTimer = nil } if b.curBundle.items.Len() == 0 { return } // Here, both semaphores must have been initialized. bun := b.curBundle b.curBundle = bundle{items: b.itemSliceZero} ticket := b.nextTicket b.nextTicket++ go func() { defer func() { b.sem.Release(int64(bun.size)) b.release(ticket) }() b.acquire(ticket) b.handler(bun.items.Interface()) }() } // acquire blocks until ticket is the next to be served, then returns. In order for N // acquire calls to return, the tickets must be in the range [0, N). A ticket must // not be presented to acquire more than once. func (b *Bundler) acquire(ticket uint64) { b.mu2.Lock() defer b.mu2.Unlock() if ticket < b.nextHandled { panic("bundler: acquire: arg too small") } for !(ticket == b.nextHandled && len(b.active) < b.HandlerLimit) { b.cond.Wait() } // Here, // ticket == b.nextHandled: the caller is the next one to be handled; // and len(b.active) < b.HandlerLimit: there is space available. b.active[ticket] = true b.nextHandled++ // Broadcast, not Signal: although at most one acquire waiter can make progress, // there might be waiters in waitUntilAllHandled. b.cond.Broadcast() } // If a ticket is used for a call to acquire, it must later be passed to release. A // ticket must not be presented to release more than once. func (b *Bundler) release(ticket uint64) { b.mu2.Lock() defer b.mu2.Unlock() if !b.active[ticket] { panic("bundler: release: not an active ticket") } delete(b.active, ticket) b.cond.Broadcast() } // waitUntilAllHandled blocks until all tickets < n have called release, meaning // all bundles with tickets < n have been handled. func (b *Bundler) waitUntilAllHandled(n uint64) { // Proof of correctness of this function. // "N is acquired" means acquire(N) has returned. // "N is released" means release(N) has returned. // 1. If N is acquired, N-1 is acquired. // Follows from the loop test in acquire, and the fact // that nextHandled is incremented by 1. // 2. If nextHandled >= N, then N-1 is acquired. // Because we only increment nextHandled to N after N-1 is acquired. // 3. If nextHandled >= N, then all n < N is acquired. // Follows from #1 and #2. // 4. If N is acquired and N is not in active, then N is released. // Because we put N in active before acquire returns, and only // remove it when it is released. // Let min(active) be the smallest member of active, or infinity if active is empty. // 5. If nextHandled >= N and N <= min(active), then all n < N is released. // From nextHandled >= N and #3, all n < N is acquired. // N <= min(active) implies n < min(active) for all n < N. So all n < N is not in active. // So from #4, all n < N is released. // The loop test below is the antecedent of #5. b.mu2.Lock() defer b.mu2.Unlock() for !(b.nextHandled >= n && n <= min(b.active)) { b.cond.Wait() } } // min returns the minimum value of the set s, or the largest uint64 if // s is empty. func min(s map[uint64]bool) uint64 { var m uint64 = math.MaxUint64 for n := range s { if n < m { m = n } } return m }
kolyshkin/moby
vendor/google.golang.org/api/support/bundler/bundler.go
GO
apache-2.0
12,735
/* * Copyright (c) 2016, Mellanox Technologies, Ltd. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef __MLX5_VXLAN_H__ #define __MLX5_VXLAN_H__ #include <linux/mlx5/driver.h> #include "en.h" struct mlx5e_vxlan { u16 udp_port; }; struct mlx5e_vxlan_work { struct work_struct work; struct mlx5e_priv *priv; sa_family_t sa_family; u16 port; }; static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev) { return (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) && mlx5_core_is_pf(mdev)); } void mlx5e_vxlan_init(struct mlx5e_priv *priv); void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv); void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family, u16 port, int add); struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port); #endif /* __MLX5_VXLAN_H__ */
geminy/aidear
oss/linux/linux-4.7/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
C
gpl-3.0
2,166
"""Implementation of JSONDecoder """ import re import sys import struct from json import scanner try: from _json import scanstring as c_scanstring except ImportError: c_scanstring = None __all__ = ['JSONDecoder'] FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL def _floatconstants(): _BYTES = '7FF80000000000007FF0000000000000'.decode('hex') if sys.byteorder != 'big': _BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1] nan, inf = struct.unpack('dd', _BYTES) return nan, inf, -inf NaN, PosInf, NegInf = _floatconstants() def linecol(doc, pos): lineno = doc.count('\n', 0, pos) + 1 if lineno == 1: colno = pos else: colno = pos - doc.rindex('\n', 0, pos) return lineno, colno def errmsg(msg, doc, pos, end=None): # Note that this function is called from _json lineno, colno = linecol(doc, pos) if end is None: fmt = '{0}: line {1} column {2} (char {3})' return fmt.format(msg, lineno, colno, pos) #fmt = '%s: line %d column %d (char %d)' #return fmt % (msg, lineno, colno, pos) endlineno, endcolno = linecol(doc, end) fmt = '{0}: line {1} column {2} - line {3} column {4} (char {5} - {6})' return fmt.format(msg, lineno, colno, endlineno, endcolno, pos, end) #fmt = '%s: line %d column %d - line %d column %d (char %d - %d)' #return fmt % (msg, lineno, colno, endlineno, endcolno, pos, end) _CONSTANTS = { '-Infinity': NegInf, 'Infinity': PosInf, 'NaN': NaN, } STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS) BACKSLASH = { '"': u'"', '\\': u'\\', '/': u'/', 'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t', } DEFAULT_ENCODING = "utf-8" def py_scanstring(s, end, encoding=None, strict=True, _b=BACKSLASH, _m=STRINGCHUNK.match): """Scan the string s for a JSON string. End is the index of the character in s after the quote that started the JSON string. Unescapes all valid JSON string escape sequences and raises ValueError on attempt to decode an invalid string. If strict is False then literal control characters are allowed in the string. Returns a tuple of the decoded string and the index of the character in s after the end quote.""" if encoding is None: encoding = DEFAULT_ENCODING chunks = [] _append = chunks.append begin = end - 1 while 1: chunk = _m(s, end) if chunk is None: raise ValueError( errmsg("Unterminated string starting at", s, begin)) end = chunk.end() content, terminator = chunk.groups() # Content is contains zero or more unescaped string characters if content: if not isinstance(content, unicode): content = unicode(content, encoding) _append(content) # Terminator is the end of string, a literal control character, # or a backslash denoting that an escape sequence follows if terminator == '"': break elif terminator != '\\': if strict: #msg = "Invalid control character %r at" % (terminator,) msg = "Invalid control character {0!r} at".format(terminator) raise ValueError(errmsg(msg, s, end)) else: _append(terminator) continue try: esc = s[end] except IndexError: raise ValueError( errmsg("Unterminated string starting at", s, begin)) # If not a unicode escape sequence, must be in the lookup table if esc != 'u': try: char = _b[esc] except KeyError: msg = "Invalid \\escape: " + repr(esc) raise ValueError(errmsg(msg, s, end)) end += 1 else: # Unicode escape sequence esc = s[end + 1:end + 5] next_end = end + 5 if len(esc) != 4: msg = "Invalid \\uXXXX escape" raise ValueError(errmsg(msg, s, end)) uni = int(esc, 16) # Check for surrogate pair on UCS-4 systems if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535: msg = "Invalid \\uXXXX\\uXXXX surrogate pair" if not s[end + 5:end + 7] == '\\u': raise ValueError(errmsg(msg, s, end)) esc2 = s[end + 7:end + 11] if len(esc2) != 4: raise ValueError(errmsg(msg, s, end)) uni2 = int(esc2, 16) uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00)) next_end += 6 char = unichr(uni) end = next_end # Append the unescaped character _append(char) return u''.join(chunks), end # Use speedup if available scanstring = c_scanstring or py_scanstring WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS) WHITESPACE_STR = ' \t\n\r' def JSONObject(s_and_end, encoding, strict, scan_once, object_hook, object_pairs_hook, _w=WHITESPACE.match, _ws=WHITESPACE_STR): s, end = s_and_end pairs = [] pairs_append = pairs.append # Use a slice to prevent IndexError from being raised, the following # check will raise a more specific ValueError if the string is empty nextchar = s[end:end + 1] # Normally we expect nextchar == '"' if nextchar != '"': if nextchar in _ws: end = _w(s, end).end() nextchar = s[end:end + 1] # Trivial empty object if nextchar == '}': if object_pairs_hook is not None: result = object_pairs_hook(pairs) return result, end pairs = {} if object_hook is not None: pairs = object_hook(pairs) return pairs, end + 1 elif nextchar != '"': raise ValueError(errmsg("Expecting property name", s, end)) end += 1 while True: key, end = scanstring(s, end, encoding, strict) # To skip some function call overhead we optimize the fast paths where # the JSON key separator is ": " or just ":". if s[end:end + 1] != ':': end = _w(s, end).end() if s[end:end + 1] != ':': raise ValueError(errmsg("Expecting : delimiter", s, end)) end += 1 try: if s[end] in _ws: end += 1 if s[end] in _ws: end = _w(s, end + 1).end() except IndexError: pass try: value, end = scan_once(s, end) except StopIteration: raise ValueError(errmsg("Expecting object", s, end)) pairs_append((key, value)) try: nextchar = s[end] if nextchar in _ws: end = _w(s, end + 1).end() nextchar = s[end] except IndexError: nextchar = '' end += 1 if nextchar == '}': break elif nextchar != ',': raise ValueError(errmsg("Expecting , delimiter", s, end - 1)) try: nextchar = s[end] if nextchar in _ws: end += 1 nextchar = s[end] if nextchar in _ws: end = _w(s, end + 1).end() nextchar = s[end] except IndexError: nextchar = '' end += 1 if nextchar != '"': raise ValueError(errmsg("Expecting property name", s, end - 1)) if object_pairs_hook is not None: result = object_pairs_hook(pairs) return result, end pairs = dict(pairs) if object_hook is not None: pairs = object_hook(pairs) return pairs, end def JSONArray(s_and_end, scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR): s, end = s_and_end values = [] nextchar = s[end:end + 1] if nextchar in _ws: end = _w(s, end + 1).end() nextchar = s[end:end + 1] # Look-ahead for trivial empty array if nextchar == ']': return values, end + 1 _append = values.append while True: try: value, end = scan_once(s, end) except StopIteration: raise ValueError(errmsg("Expecting object", s, end)) _append(value) nextchar = s[end:end + 1] if nextchar in _ws: end = _w(s, end + 1).end() nextchar = s[end:end + 1] end += 1 if nextchar == ']': break elif nextchar != ',': raise ValueError(errmsg("Expecting , delimiter", s, end)) try: if s[end] in _ws: end += 1 if s[end] in _ws: end = _w(s, end + 1).end() except IndexError: pass return values, end class JSONDecoder(object): """Simple JSON <http://json.org> decoder Performs the following translations in decoding by default: +---------------+-------------------+ | JSON | Python | +===============+===================+ | object | dict | +---------------+-------------------+ | array | list | +---------------+-------------------+ | string | unicode | +---------------+-------------------+ | number (int) | int, long | +---------------+-------------------+ | number (real) | float | +---------------+-------------------+ | true | True | +---------------+-------------------+ | false | False | +---------------+-------------------+ | null | None | +---------------+-------------------+ It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as their corresponding ``float`` values, which is outside the JSON spec. """ def __init__(self, encoding=None, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, strict=True, object_pairs_hook=None): """``encoding`` determines the encoding used to interpret any ``str`` objects decoded by this instance (utf-8 by default). It has no effect when decoding ``unicode`` objects. Note that currently only encodings that are a superset of ASCII work, strings of other encodings should be passed in as ``unicode``. ``object_hook``, if specified, will be called with the result of every JSON object decoded and its return value will be used in place of the given ``dict``. This can be used to provide custom deserializations (e.g. to support JSON-RPC class hinting). ``object_pairs_hook``, if specified will be called with the result of every JSON object decoded with an ordered list of pairs. The return value of ``object_pairs_hook`` will be used instead of the ``dict``. This feature can be used to implement custom decoders that rely on the order that the key and value pairs are decoded (for example, collections.OrderedDict will remember the order of insertion). If ``object_hook`` is also defined, the ``object_pairs_hook`` takes priority. ``parse_float``, if specified, will be called with the string of every JSON float to be decoded. By default this is equivalent to float(num_str). This can be used to use another datatype or parser for JSON floats (e.g. decimal.Decimal). ``parse_int``, if specified, will be called with the string of every JSON int to be decoded. By default this is equivalent to int(num_str). This can be used to use another datatype or parser for JSON integers (e.g. float). ``parse_constant``, if specified, will be called with one of the following strings: -Infinity, Infinity, NaN. This can be used to raise an exception if invalid JSON numbers are encountered. If ``strict`` is false (true is the default), then control characters will be allowed inside strings. Control characters in this context are those with character codes in the 0-31 range, including ``'\\t'`` (tab), ``'\\n'``, ``'\\r'`` and ``'\\0'``. """ self.encoding = encoding self.object_hook = object_hook self.object_pairs_hook = object_pairs_hook self.parse_float = parse_float or float self.parse_int = parse_int or int self.parse_constant = parse_constant or _CONSTANTS.__getitem__ self.strict = strict self.parse_object = JSONObject self.parse_array = JSONArray self.parse_string = scanstring self.scan_once = scanner.make_scanner(self) def decode(self, s, _w=WHITESPACE.match): """Return the Python representation of ``s`` (a ``str`` or ``unicode`` instance containing a JSON document) """ obj, end = self.raw_decode(s, idx=_w(s, 0).end()) end = _w(s, end).end() if end != len(s): raise ValueError(errmsg("Extra data", s, end, len(s))) return obj def raw_decode(self, s, idx=0): """Decode a JSON document from ``s`` (a ``str`` or ``unicode`` beginning with a JSON document) and return a 2-tuple of the Python representation and the index in ``s`` where the document ended. This can be used to decode a JSON document from a string that may have extraneous data at the end. """ try: obj, end = self.scan_once(s, idx) except StopIteration: raise ValueError("No JSON object could be decoded") return obj, end
huran2014/huran.github.io
wot_gateway/usr/lib/python2.7/json/decoder.py
Python
gpl-2.0
13,785