repo_name
stringlengths 4
116
| path
stringlengths 3
942
| size
stringlengths 1
7
| content
stringlengths 3
1.05M
| license
stringclasses 15
values |
---|---|---|---|---|
master777/tap | wp-content/plugins/wonderplugin-carousel/engine/wonderplugincarouselskins.js | 28810 | /**
* WonderPlugin Carousel Skin Options
* Copyright 2014 Magic Hills Pty Ltd - http://www.wonderplugin.com
*/
var WONDERPLUGIN_CAROUSEL_SKIN_OPTIONS = {
classic : {
width:240,
height:180,
skinsfoldername:"",
arrowhideonmouseleave:1000,
itembottomshadowimagetop:100,
navheight:16,
random:false,
showbottomshadow:false,
arrowheight:32,
itembackgroundimagewidth:100,
skin:"classic",
responsive:true,
bottomshadowimage:"bottomshadow-110-95-0.png",
enabletouchswipe:true,
navstyle:"bullets",
backgroundimagetop:-40,
arrowstyle:"always",
bottomshadowimagetop:95,
hoveroverlayimage:"hoveroverlay-64-64-3.png",
itembottomshadowimage:"itembottomshadow-100-100-5.png",
showitembottomshadow:false,
transitioneasing:"easeOutExpo",
showitembackgroundimage:false,
itembackgroundimage:"",
playvideoimagepos:"center",
circular:true,
arrowimage:"arrows-32-32-2.png",
direction:"horizontal",
navimage:"bullet-16-16-0.png",
itembackgroundimagetop:0,
showbackgroundimage:false,
lightboxbarheight:48,
showplayvideo:true,
spacing:18,
scrollitems:1,
showhoveroverlay:true,
scrollmode:"page",
navdirection:"horizontal",
itembottomshadowimagewidth:100,
backgroundimage:"",
autoplay:true,
arrowwidth:32,
pauseonmouseover:true,
navmode:"page",
interval:3000,
backgroundimagewidth:110,
navspacing:8,
playvideoimage:"playvideo-64-64-0.png",
visibleitems:3,
navswitchonmouseover:false,
bottomshadowimagewidth:110,
screenquery:'{\n "tablet": {\n "screenwidth": 900,\n "visibleitems": 2\n },\n "mobile": {\n "screenwidth": 600,\n "visibleitems": 1\n }\n}',
navwidth:16,
loop:0,
transitionduration:1000
},
gallery: {
width:240,
height:180,
skinsfoldername:"",
arrowhideonmouseleave:1000,
itembottomshadowimagetop:99,
navheight:16,
random:false,
showbottomshadow:false,
arrowheight:48,
itembackgroundimagewidth:100,
skin:"gallery",
responsive:true,
bottomshadowimage:"bottomshadow-110-95-0.png",
enabletouchswipe:true,
navstyle:"bullets",
backgroundimagetop:-40,
arrowstyle:"always",
bottomshadowimagetop:95,
hoveroverlayimage:"hoveroverlay-64-64-5.png",
itembottomshadowimage:"itembottomshadow-100-98-3.png",
showitembottomshadow:false,
transitioneasing:"easeOutExpo",
showitembackgroundimage:false,
itembackgroundimage:"",
playvideoimagepos:"center",
circular:true,
arrowimage:"arrows-48-48-2.png",
direction:"horizontal",
navimage:"bullet-16-16-0.png",
itembackgroundimagetop:0,
showbackgroundimage:false,
lightboxbarheight:48,
showplayvideo:true,
spacing:4,
scrollitems:1,
showhoveroverlay:true,
scrollmode:"page",
navdirection:"horizontal",
itembottomshadowimagewidth:100,
backgroundimage:"",
autoplay:true,
arrowwidth:48,
pauseonmouseover:true,
navmode:"page",
interval:3000,
backgroundimagewidth:110,
navspacing:8,
playvideoimage:"playvideo-64-64-0.png",
visibleitems:3,
navswitchonmouseover:false,
bottomshadowimagewidth:110,
screenquery:'{\n "tablet": {\n "screenwidth": 900,\n "visibleitems": 2\n },\n "mobile": {\n "screenwidth": 600,\n "visibleitems": 1\n }\n}',
navwidth:16,
loop:0,
transitionduration:1000
},
highlight: {
width:240,
height:180,
skinsfoldername:"",
arrowhideonmouseleave:1000,
itembottomshadowimagetop:99,
navheight:16,
random:false,
showbottomshadow:false,
arrowheight:48,
itembackgroundimagewidth:100,
skin:"highlight",
responsive:true,
bottomshadowimage:"bottomshadow-110-95-0.png",
enabletouchswipe:true,
navstyle:"bullets",
backgroundimagetop:-40,
arrowstyle:"always",
bottomshadowimagetop:95,
hoveroverlayimage:"hoveroverlay-64-64-5.png",
itembottomshadowimage:"itembottomshadow-100-98-3.png",
showitembottomshadow:true,
transitioneasing:"easeOutExpo",
showitembackgroundimage:false,
itembackgroundimage:"",
playvideoimagepos:"center",
circular:true,
arrowimage:"arrows-48-48-2.png",
direction:"horizontal",
navimage:"bullet-16-16-0.png",
itembackgroundimagetop:0,
showbackgroundimage:false,
lightboxbarheight:48,
showplayvideo:true,
spacing:4,
scrollitems:1,
showhoveroverlay:true,
scrollmode:"page",
navdirection:"horizontal",
itembottomshadowimagewidth:100,
backgroundimage:"",
autoplay:true,
arrowwidth:48,
pauseonmouseover:true,
navmode:"page",
interval:3000,
backgroundimagewidth:110,
navspacing:8,
playvideoimage:"playvideo-64-64-0.png",
visibleitems:3,
navswitchonmouseover:false,
bottomshadowimagewidth:110,
screenquery:'{\n "tablet": {\n "screenwidth": 900,\n "visibleitems": 2\n },\n "mobile": {\n "screenwidth": 600,\n "visibleitems": 1\n }\n}',
navwidth:16,
loop:0,
transitionduration:1000
},
list: {
width:240,
height:180,
skinsfoldername:"",
arrowhideonmouseleave:1000,
itembottomshadowimagetop:99,
navheight:12,
random:false,
showbottomshadow:false,
arrowheight:28,
itembackgroundimagewidth:100,
skin:"list",
responsive:true,
bottomshadowimage:"bottomshadow-110-95-0.png",
enabletouchswipe:true,
navstyle:"bullets",
backgroundimagetop:-40,
arrowstyle:"always",
bottomshadowimagetop:95,
hoveroverlayimage:"hoveroverlay-64-64-5.png",
itembottomshadowimage:"itembottomshadow-100-98-3.png",
showitembottomshadow:false,
transitioneasing:"easeOutExpo",
showitembackgroundimage:false,
itembackgroundimage:"",
playvideoimagepos:"center",
circular:true,
arrowimage:"arrows-28-28-0.png",
direction:"vertical",
navimage:"bullet-12-12-1.png",
itembackgroundimagetop:0,
showbackgroundimage:false,
lightboxbarheight:48,
showplayvideo:true,
spacing:8,
scrollitems:1,
showhoveroverlay:true,
scrollmode:"page",
navdirection:"horizontal",
itembottomshadowimagewidth:100,
backgroundimage:"",
autoplay:true,
arrowwidth:28,
pauseonmouseover:true,
navmode:"page",
interval:3000,
backgroundimagewidth:110,
navspacing:4,
playvideoimage:"playvideo-64-64-0.png",
visibleitems:3,
navswitchonmouseover:false,
bottomshadowimagewidth:110,
screenquery:'{\n "mobile": {\n "screenwidth": 600,\n "visibleitems": 1\n }\n}',
navwidth:12,
loop:0,
transitionduration:1000
},
navigator: {
width:240,
height:180,
skinsfoldername:"",
arrowhideonmouseleave:1000,
itembottomshadowimagetop:99,
navheight:12,
random:false,
showbottomshadow:false,
arrowheight:28,
itembackgroundimagewidth:100,
skin:"navigator",
responsive:true,
bottomshadowimage:"bottomshadow-110-95-0.png",
enabletouchswipe:true,
navstyle:"bullets",
backgroundimagetop:-40,
arrowstyle:"always",
bottomshadowimagetop:95,
hoveroverlayimage:"hoveroverlay-64-64-5.png",
itembottomshadowimage:"itembottomshadow-100-98-3.png",
showitembottomshadow:false,
transitioneasing:"easeOutExpo",
showitembackgroundimage:false,
itembackgroundimage:"",
playvideoimagepos:"center",
circular:true,
arrowimage:"arrows-28-28-0.png",
direction:"horizontal",
navimage:"bullet-12-12-1.png",
itembackgroundimagetop:0,
showbackgroundimage:false,
lightboxbarheight:48,
showplayvideo:true,
spacing:4,
scrollitems:1,
showhoveroverlay:true,
scrollmode:"page",
navdirection:"horizontal",
itembottomshadowimagewidth:100,
backgroundimage:"",
autoplay:true,
arrowwidth:28,
pauseonmouseover:true,
navmode:"page",
interval:3000,
backgroundimagewidth:110,
navspacing:4,
playvideoimage:"playvideo-64-64-0.png",
visibleitems:2,
navswitchonmouseover:false,
bottomshadowimagewidth:110,
screenquery:'{\n "mobile": {\n "screenwidth": 600,\n "visibleitems": 1\n }\n}',
navwidth:12,
loop:0,
transitionduration:1000
},
showcase: {
width:240,
height:180,
skinsfoldername:"",
arrowhideonmouseleave:1000,
itembottomshadowimagetop:99,
navheight:16,
random:false,
showbottomshadow:false,
arrowheight:32,
itembackgroundimagewidth:100,
skin:"showcase",
responsive:true,
bottomshadowimage:"bottomshadow-110-95-0.png",
enabletouchswipe:true,
navstyle:"bullets",
backgroundimagetop:-40,
arrowstyle:"none",
bottomshadowimagetop:95,
hoveroverlayimage:"hoveroverlay-64-64-5.png",
itembottomshadowimage:"itembottomshadow-100-98-3.png",
showitembottomshadow:false,
transitioneasing:"easeOutExpo",
showitembackgroundimage:false,
itembackgroundimage:"",
playvideoimagepos:"center",
circular:true,
arrowimage:"arrows-32-32-4.png",
direction:"vertical",
navimage:"bullet-16-16-0.png",
itembackgroundimagetop:0,
showbackgroundimage:false,
lightboxbarheight:48,
showplayvideo:true,
spacing:8,
scrollitems:1,
showhoveroverlay:true,
scrollmode:"page",
navdirection:"vertical",
itembottomshadowimagewidth:100,
backgroundimage:"",
autoplay:true,
arrowwidth:32,
pauseonmouseover:true,
navmode:"page",
interval:3000,
backgroundimagewidth:110,
navspacing:8,
playvideoimage:"playvideo-64-64-0.png",
visibleitems:1,
navswitchonmouseover:true,
bottomshadowimagewidth:110,
screenquery:'{\n "mobile": {\n "screenwidth": 600,\n "visibleitems": 1\n }\n}',
navwidth:16,
loop:0,
transitionduration:1000
},
simplicity: {
width:240,
height:180,
skinsfoldername:"",
arrowhideonmouseleave:1000,
itembottomshadowimagetop:100,
navheight:16,
random:false,
showbottomshadow:false,
arrowheight:32,
itembackgroundimagewidth:100,
skin:"simplicity",
responsive:true,
bottomshadowimage:"bottomshadow-110-95-0.png",
enabletouchswipe:true,
navstyle:"none",
backgroundimagetop:-40,
arrowstyle:"always",
bottomshadowimagetop:95,
hoveroverlayimage:"hoveroverlay-64-64-6.png",
itembottomshadowimage:"itembottomshadow-100-100-5.png",
showitembottomshadow:false,
transitioneasing:"easeOutExpo",
showitembackgroundimage:false,
itembackgroundimage:"",
playvideoimagepos:"center",
circular:true,
arrowimage:"arrows-32-32-1.png",
direction:"horizontal",
navimage:"bullet-16-16-0.png",
itembackgroundimagetop:0,
showbackgroundimage:false,
lightboxbarheight:48,
showplayvideo:true,
spacing:4,
scrollitems:1,
showhoveroverlay:true,
scrollmode:"page",
navdirection:"horizontal",
itembottomshadowimagewidth:100,
backgroundimage:"",
autoplay:true,
arrowwidth:32,
pauseonmouseover:true,
navmode:"page",
interval:3000,
backgroundimagewidth:110,
navspacing:8,
playvideoimage:"playvideo-64-64-0.png",
visibleitems:3,
navswitchonmouseover:false,
bottomshadowimagewidth:110,
screenquery:'{\n "tablet": {\n "screenwidth": 900,\n "visibleitems": 2\n },\n "mobile": {\n "screenwidth": 600,\n "visibleitems": 1\n }\n}',
navwidth:16,
loop:0,
transitionduration:1000
},
stylish: {
width:240,
height:180,
skinsfoldername:"",
arrowhideonmouseleave:1000,
itembottomshadowimagetop:100,
navheight:24,
random:false,
showbottomshadow:true,
arrowheight:32,
itembackgroundimagewidth:100,
skin:"stylish",
responsive:true,
bottomshadowimage:"bottomshadow-110-100-5.png",
enabletouchswipe:false,
navstyle:"bullets",
backgroundimagetop:-40,
arrowstyle:"always",
bottomshadowimagetop:100,
hoveroverlayimage:"hoveroverlay-64-64-4.png",
itembottomshadowimage:"itembottomshadow-100-100-5.png",
showitembottomshadow:false,
transitioneasing:"easeOutExpo",
showitembackgroundimage:false,
itembackgroundimage:"",
playvideoimagepos:"center",
circular:true,
arrowimage:"arrows-32-32-0.png",
direction:"horizontal",
navimage:"bullet-24-24-0.png",
itembackgroundimagetop:0,
showbackgroundimage:false,
lightboxbarheight:48,
showplayvideo:true,
spacing:8,
scrollitems:1,
showhoveroverlay:true,
scrollmode:"page",
navdirection:"horizontal",
itembottomshadowimagewidth:100,
backgroundimage:"",
autoplay:true,
arrowwidth:32,
pauseonmouseover:true,
navmode:"page",
interval:3000,
backgroundimagewidth:110,
navspacing:4,
playvideoimage:"playvideo-64-64-0.png",
visibleitems:3,
navswitchonmouseover:false,
bottomshadowimagewidth:110,
screenquery:'{\n "tablet": {\n "screenwidth": 900,\n "visibleitems": 2\n },\n "mobile": {\n "screenwidth": 600,\n "visibleitems": 1\n }\n}',
navwidth:24,
loop:0,
transitionduration:1000
},
thumbnail: {
width:240,
height:180,
skinsfoldername:"",
arrowhideonmouseleave:1000,
itembottomshadowimagetop:99,
navheight:16,
random:false,
showbottomshadow:false,
arrowheight:28,
itembackgroundimagewidth:100,
skin:"thumbnail",
responsive:true,
bottomshadowimage:"bottomshadow-110-95-0.png",
enabletouchswipe:true,
navstyle:"none",
backgroundimagetop:-40,
arrowstyle:"always",
bottomshadowimagetop:95,
hoveroverlayimage:"hoveroverlay-64-64-5.png",
itembottomshadowimage:"itembottomshadow-100-98-3.png",
showitembottomshadow:false,
transitioneasing:"easeOutExpo",
showitembackgroundimage:false,
itembackgroundimage:"",
playvideoimagepos:"center",
circular:true,
arrowimage:"arrows-28-28-0.png",
direction:"horizontal",
navimage:"bullet-16-16-0.png",
itembackgroundimagetop:0,
showbackgroundimage:false,
lightboxbarheight:48,
showplayvideo:true,
spacing:8,
scrollitems:1,
showhoveroverlay:true,
scrollmode:"page",
navdirection:"horizontal",
itembottomshadowimagewidth:100,
backgroundimage:"",
autoplay:true,
arrowwidth:28,
pauseonmouseover:true,
navmode:"page",
interval:3000,
backgroundimagewidth:110,
navspacing:8,
playvideoimage:"playvideo-64-64-0.png",
visibleitems:1,
navswitchonmouseover:false,
bottomshadowimagewidth:110,
screenquery:'{\n "mobile": {\n "screenwidth": 600,\n "visibleitems": 1\n }\n}',
navwidth:16,
loop:0,
transitionduration:750
},
vertical: {
width:240,
height:180,
skinsfoldername:"",
arrowhideonmouseleave:1000,
itembottomshadowimagetop:100,
navheight:24,
random:false,
showbottomshadow:false,
arrowheight:32,
itembackgroundimagewidth:100,
skin:"vertical",
responsive:true,
bottomshadowimage:"bottomshadow-110-100-5.png",
enabletouchswipe:true,
navstyle:"none",
backgroundimagetop:-40,
arrowstyle:"always",
bottomshadowimagetop:100,
hoveroverlayimage:"hoveroverlay-64-64-4.png",
itembottomshadowimage:"itembottomshadow-100-100-5.png",
showitembottomshadow:false,
transitioneasing:"easeOutExpo",
showitembackgroundimage:false,
itembackgroundimage:"",
playvideoimagepos:"center",
circular:true,
arrowimage:"arrows-32-32-4.png",
direction:"vertical",
navimage:"bullet-24-24-0.png",
itembackgroundimagetop:0,
showbackgroundimage:false,
lightboxbarheight:48,
showplayvideo:true,
spacing:12,
scrollitems:1,
showhoveroverlay:true,
scrollmode:"page",
navdirection:"vertical",
itembottomshadowimagewidth:100,
backgroundimage:"",
autoplay:true,
arrowwidth:32,
pauseonmouseover:true,
navmode:"page",
interval:3000,
backgroundimagewidth:110,
navspacing:4,
playvideoimage:"playvideo-64-64-0.png",
visibleitems:2,
navswitchonmouseover:false,
bottomshadowimagewidth:110,
screenquery:'{\n "mobile": {\n "screenwidth": 600,\n "visibleitems": 1\n }\n}',
navwidth:24,
loop:0,
transitionduration:1000
},
testimonial: {
width:360,
height:270,
skinsfoldername:"",
arrowhideonmouseleave:1000,
itembottomshadowimagetop:99,
donotcrop:false,
navheight:12,
random:false,
showhoveroverlay:true,
height:270,
arrowheight:32,
itembackgroundimagewidth:100,
skin:"testimonial",
responsive:true,
bottomshadowimage:"bottomshadow-110-95-0.png",
navstyle:"none",
enabletouchswipe:true,
backgroundimagetop:-40,
arrowstyle:"mouseover",
bottomshadowimagetop:95,
transitionduration:1000,
lightboxshowtitle:true,
hoveroverlayimage:"hoveroverlay-64-64-5.png",
itembottomshadowimage:"itembottomshadow-100-98-3.png",
lightboxshowdescription:false,
width:360,
showitembottomshadow:false,
showhoveroverlayalways:false,
navimage:"bullet-12-12-1.png",
lightboxtitlebottomcss:"{color:#333; font-size:14px; font-family:Armata,sans-serif,Arial; overflow:hidden; text-align:left;}",
lightboxshownavigation:false,
showitembackgroundimage:false,
itembackgroundimage:"",
backgroundimagewidth:110,
playvideoimagepos:"center",
circular:true,
arrowimage:"arrows-32-32-2.png",
scrollitems:1,
showbottomshadow:false,
lightboxdescriptionbottomcss:"{color:#333; font-size:12px; font-family:Arial,Helvetica,sans-serif; overflow:hidden; text-align:left; margin:4px 0px 0px; padding: 0px;}",
supportiframe:false,
transitioneasing:"easeOutExpo",
itembackgroundimagetop:0,
showbackgroundimage:false,
lightboxbarheight:64,
showplayvideo:true,
spacing:4,
lightboxthumbwidth:80,
scrollmode:"item",
navdirection:"horizontal",
itembottomshadowimagewidth:100,
backgroundimage:"",
lightboxthumbtopmargin:12,
arrowwidth:32,
transparent:false,
navmode:"page",
lightboxthumbbottommargin:8,
interval:2000,
lightboxthumbheight:60,
navspacing:4,
pauseonmouseover:false,
imagefillcolor:"FFFFFF",
playvideoimage:"playvideo-64-64-0.png",
visibleitems:1,
navswitchonmouseover:false,
direction:"horizontal",
usescreenquery:false,
bottomshadowimagewidth:110,
screenquery:'{\n "mobile": {\n "screenwidth": 600,\n "visibleitems": 1\n }\n}',
navwidth:12,
loop:0,
autoplay:true
},
fashion: {
width:300,
height:300,
skinsfoldername:"",
arrowhideonmouseleave:1000,
itembottomshadowimagetop:100,
donotcrop:false,
navheight:16,
random:false,
showhoveroverlay:false,
height:300,
arrowheight:60,
itembackgroundimagewidth:100,
skin:"fashion",
responsive:true,
bottomshadowimage:"bottomshadow-110-95-0.png",
navstyle:"bullets",
enabletouchswipe:true,
backgroundimagetop:-40,
arrowstyle:"mouseover",
bottomshadowimagetop:95,
transitionduration:1000,
lightboxshowtitle:true,
hoveroverlayimage:"hoveroverlay-64-64-4.png",
itembottomshadowimage:"itembottomshadow-100-100-5.png",
lightboxshowdescription:false,
width:300,
showitembottomshadow:false,
showhoveroverlayalways:false,
navimage:"bullet-16-16-1.png",
lightboxtitlebottomcss:"{color:#333; font-size:14px; font-family:Armata,sans-serif,Arial; overflow:hidden; text-align:left;}",
lightboxshownavigation:false,
showitembackgroundimage:false,
itembackgroundimage:"",
backgroundimagewidth:110,
playvideoimagepos:"center",
circular:true,
arrowimage:"arrows-42-60-0.png",
scrollitems:1,
showbottomshadow:false,
lightboxdescriptionbottomcss:"{color:#333; font-size:12px; font-family:Arial,Helvetica,sans-serif; overflow:hidden; text-align:left; margin:4px 0px 0px; padding: 0px;}",
supportiframe:false,
transitioneasing:"easeOutExpo",
itembackgroundimagetop:0,
showbackgroundimage:false,
lightboxbarheight:64,
showplayvideo:true,
spacing:0,
lightboxthumbwidth:80,
scrollmode:"page",
navdirection:"horizontal",
itembottomshadowimagewidth:100,
backgroundimage:"",
lightboxthumbtopmargin:12,
arrowwidth:42,
transparent:false,
navmode:"page",
lightboxthumbbottommargin:8,
interval:3000,
lightboxthumbheight:60,
navspacing:8,
pauseonmouseover:true,
imagefillcolor:"FFFFFF",
playvideoimage:"playvideo-64-64-0.png",
visibleitems:3,
navswitchonmouseover:false,
direction:"horizontal",
usescreenquery:false,
bottomshadowimagewidth:110,
screenquery:'{\n "tablet": {\n "screenwidth": 900,\n "visibleitems": 2\n },\n "mobile": {\n "screenwidth": 600,\n "visibleitems": 1\n }\n}',
navwidth:16,
loop:0,
autoplay:true
},
rotator: {
width:200,
height:200,
skinsfoldername:"",
interval:3000,
itembottomshadowimagetop:100,
donotcrop:false,
random:false,
showhoveroverlay:true,
arrowheight:36,
showbottomshadow:false,
itembackgroundimagewidth:100,
skin:"Rotator",
responsive:true,
lightboxtitlebottomcss:"{color:#333; font-size:14px; font-family:Armata,sans-serif,Arial; overflow:hidden; text-align:left;}",
enabletouchswipe:true,
navstyle:"none",
backgroundimagetop:-40,
arrowstyle:"mouseover",
bottomshadowimagetop:100,
transitionduration:1000,
itembackgroundimagetop:0,
hoveroverlayimage:"hoveroverlay-64-64-9.png",
itembottomshadowimage:"itembottomshadow-100-100-5.png",
lightboxshowdescription:false,
navswitchonmouseover:false,
showhoveroverlayalways:false,
transitioneasing:"easeOutExpo",
lightboxshownavigation:false,
showitembackgroundimage:false,
itembackgroundimage:"",
playvideoimagepos:"center",
circular:true,
arrowimage:"arrows-36-36-1.png",
scrollitems:1,
direction:"vertical",
lightboxdescriptionbottomcss:"{color:#333; font-size:12px; font-family:Arial,Helvetica,sans-serif; overflow:hidden; text-align:left; margin:4px 0px 0px; padding: 0px;}",
supportiframe:false,
navimage:"bullet-24-24-0.png",
backgroundimagewidth:110,
showbackgroundimage:false,
lightboxbarheight:64,
showplayvideo:true,
spacing:8,
lightboxthumbwidth:80,
navdirection:"vertical",
itembottomshadowimagewidth:100,
backgroundimage:"",
lightboxthumbtopmargin:12,
autoplay:true,
arrowwidth:36,
transparent:false,
bottomshadowimage:"bottomshadow-110-100-5.png",
scrollmode:"page",
navmode:"page",
lightboxshowtitle:true,
lightboxthumbbottommargin:8,
arrowhideonmouseleave:1000,
showitembottomshadow:false,
lightboxthumbheight:60,
navspacing:4,
pauseonmouseover:true,
imagefillcolor:"FFFFFF",
playvideoimage:"playvideo-64-64-0.png",
visibleitems:2,
usescreenquery:false,
bottomshadowimagewidth:110,
screenquery:'{\n "mobile": {\n "screenwidth": 600,\n "visibleitems": 1\n }\n}',
navwidth:24,
loop:0,
navheight:24
},
testimonialcarousel: {
width:280,
height:240,
skinsfoldername:"",
interval:3000,
itembottomshadowimagetop:99,
donotcrop:false,
random:false,
showhoveroverlay:false,
arrowheight:32,
showbottomshadow:false,
itembackgroundimagewidth:100,
skin:"TestimonialCarousel",
responsive:true,
lightboxtitlebottomcss:"{color:#333; font-size:14px; font-family:Armata,sans-serif,Arial; overflow:hidden; text-align:left;}",
enabletouchswipe:true,
navstyle:"bullets",
backgroundimagetop:-40,
arrowstyle:"mouseover",
bottomshadowimagetop:95,
transitionduration:1000,
itembackgroundimagetop:0,
hoveroverlayimage:"hoveroverlay-64-64-9.png",
itembottomshadowimage:"itembottomshadow-100-98-3.png",
lightboxshowdescription:false,
navswitchonmouseover:false,
showhoveroverlayalways:false,
transitioneasing:"easeOutExpo",
lightboxshownavigation:false,
showitembackgroundimage:false,
itembackgroundimage:"",
playvideoimagepos:"center",
circular:true,
arrowimage:"arrows-32-32-2.png",
scrollitems:1,
direction:"horizontal",
lightboxdescriptionbottomcss:"{color:#333; font-size:12px; font-family:Arial,Helvetica,sans-serif; overflow:hidden; text-align:left; margin:4px 0px 0px; padding: 0px;}",
supportiframe:false,
navimage:"bullet-16-16-1.png",
backgroundimagewidth:110,
showbackgroundimage:false,
lightboxbarheight:64,
showplayvideo:true,
spacing:4,
lightboxthumbwidth:80,
navdirection:"horizontal",
itembottomshadowimagewidth:100,
backgroundimage:"",
lightboxthumbtopmargin:12,
autoplay:true,
arrowwidth:32,
transparent:false,
bottomshadowimage:"bottomshadow-110-95-0.png",
scrollmode:"page",
navmode:"page",
lightboxshowtitle:true,
lightboxthumbbottommargin:8,
arrowhideonmouseleave:600,
showitembottomshadow:false,
lightboxthumbheight:60,
navspacing:4,
pauseonmouseover:false,
imagefillcolor:"FFFFFF",
playvideoimage:"playvideo-64-64-0.png",
visibleitems:3,
usescreenquery:false,
bottomshadowimagewidth:110,
screenquery:'{\n "tablet": {\n "screenwidth": 900,\n "visibleitems": 2\n },\n "mobile": {\n "screenwidth": 600,\n "visibleitems": 1\n }\n}',
navwidth:16,
loop:0,
navheight:16
}
}; | gpl-2.0 |
Fe-Pi/linux | drivers/char/broadcom/vc_sm/vmcs_sm.c | 93463 | /*
****************************************************************************
* Copyright 2011-2012 Broadcom Corporation. All rights reserved.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available at
* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a
* license other than the GPL, without Broadcom's express prior written
* consent.
****************************************************************************
*/
/* ---- Include Files ----------------------------------------------------- */
#include <linux/cdev.h>
#include <linux/broadcom/vc_mem.h>
#include <linux/device.h>
#include <linux/debugfs.h>
#include <linux/dma-mapping.h>
#include <linux/dma-buf.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/hugetlb.h>
#include <linux/ioctl.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pfn.h>
#include <linux/proc_fs.h>
#include <linux/pagemap.h>
#include <linux/semaphore.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/types.h>
#include <asm/cacheflush.h>
#include "vchiq_connected.h"
#include "vc_vchi_sm.h"
#include <linux/broadcom/vmcs_sm_ioctl.h>
#include "vc_sm_knl.h"
/* ---- Private Constants and Types --------------------------------------- */
#define DEVICE_NAME "vcsm"
#define DRIVER_NAME "bcm2835-vcsm"
#define DEVICE_MINOR 0
#define VC_SM_DIR_ROOT_NAME "vc-smem"
#define VC_SM_DIR_ALLOC_NAME "alloc"
#define VC_SM_STATE "state"
#define VC_SM_STATS "statistics"
#define VC_SM_RESOURCES "resources"
#define VC_SM_DEBUG "debug"
#define VC_SM_WRITE_BUF_SIZE 128
/* Statistics tracked per resource and globally. */
enum sm_stats_t {
/* Attempt. */
ALLOC,
FREE,
LOCK,
UNLOCK,
MAP,
FLUSH,
INVALID,
IMPORT,
END_ATTEMPT,
/* Failure. */
ALLOC_FAIL,
FREE_FAIL,
LOCK_FAIL,
UNLOCK_FAIL,
MAP_FAIL,
FLUSH_FAIL,
INVALID_FAIL,
IMPORT_FAIL,
END_ALL,
};
static const char *const sm_stats_human_read[] = {
"Alloc",
"Free",
"Lock",
"Unlock",
"Map",
"Cache Flush",
"Cache Invalidate",
"Import",
};
typedef int (*VC_SM_SHOW) (struct seq_file *s, void *v);
struct sm_pde_t {
VC_SM_SHOW show; /* Debug fs function hookup. */
struct dentry *dir_entry; /* Debug fs directory entry. */
void *priv_data; /* Private data */
};
/* Single resource allocation tracked for all devices. */
struct sm_mmap {
struct list_head map_list; /* Linked list of maps. */
struct sm_resource_t *resource; /* Pointer to the resource. */
pid_t res_pid; /* PID owning that resource. */
unsigned int res_vc_hdl; /* Resource handle (videocore). */
unsigned int res_usr_hdl; /* Resource handle (user). */
unsigned long res_addr; /* Mapped virtual address. */
struct vm_area_struct *vma; /* VM area for this mapping. */
unsigned int ref_count; /* Reference count to this vma. */
/* Used to link maps associated with a resource. */
struct list_head resource_map_list;
};
/* Single resource allocation tracked for each opened device. */
struct sm_resource_t {
struct list_head resource_list; /* List of resources. */
struct list_head global_resource_list; /* Global list of resources. */
pid_t pid; /* PID owning that resource. */
uint32_t res_guid; /* Unique identifier. */
uint32_t lock_count; /* Lock count for this resource. */
uint32_t ref_count; /* Ref count for this resource. */
uint32_t res_handle; /* Resource allocation handle. */
void *res_base_mem; /* Resource base memory address. */
uint32_t res_size; /* Resource size allocated. */
enum vmcs_sm_cache_e res_cached; /* Resource cache type. */
struct sm_resource_t *res_shared; /* Shared resource */
enum sm_stats_t res_stats[END_ALL]; /* Resource statistics. */
uint8_t map_count; /* Counter of mappings for this resource. */
struct list_head map_list; /* Maps associated with a resource. */
/* DMABUF related fields */
struct dma_buf *dma_buf;
struct dma_buf_attachment *attach;
struct sg_table *sgt;
dma_addr_t dma_addr;
struct sm_priv_data_t *private;
bool map; /* whether to map pages up front */
};
/* Private file data associated with each opened device. */
struct sm_priv_data_t {
struct list_head resource_list; /* List of resources. */
pid_t pid; /* PID of creator. */
struct dentry *dir_pid; /* Debug fs entries root. */
struct sm_pde_t dir_stats; /* Debug fs entries statistics sub-tree. */
struct sm_pde_t dir_res; /* Debug fs resource sub-tree. */
int restart_sys; /* Tracks restart on interrupt. */
enum vc_sm_msg_type int_action; /* Interrupted action. */
uint32_t int_trans_id; /* Interrupted transaction. */
};
/* Global state information. */
struct sm_state_t {
struct platform_device *pdev;
struct sm_instance *sm_handle; /* Handle for videocore service. */
struct dentry *dir_root; /* Debug fs entries root. */
struct dentry *dir_alloc; /* Debug fs entries allocations. */
struct sm_pde_t dir_stats; /* Debug fs entries statistics sub-tree. */
struct sm_pde_t dir_state; /* Debug fs entries state sub-tree. */
struct dentry *debug; /* Debug fs entries debug. */
struct mutex map_lock; /* Global map lock. */
struct list_head map_list; /* List of maps. */
struct list_head resource_list; /* List of resources. */
enum sm_stats_t deceased[END_ALL]; /* Natural termination stats. */
enum sm_stats_t terminated[END_ALL]; /* Forced termination stats. */
uint32_t res_deceased_cnt; /* Natural termination counter. */
uint32_t res_terminated_cnt; /* Forced termination counter. */
struct cdev sm_cdev; /* Device. */
dev_t sm_devid; /* Device identifier. */
struct class *sm_class; /* Class. */
struct device *sm_dev; /* Device. */
struct sm_priv_data_t *data_knl; /* Kernel internal data tracking. */
struct mutex lock; /* Global lock. */
uint32_t guid; /* GUID (next) tracker. */
};
/* ---- Private Variables ----------------------------------------------- */
static struct sm_state_t *sm_state;
static int sm_inited;
#if 0
static const char *const sm_cache_map_vector[] = {
"(null)",
"host",
"videocore",
"host+videocore",
};
#endif
/* ---- Private Function Prototypes -------------------------------------- */
/* ---- Private Functions ------------------------------------------------ */
static inline unsigned int vcaddr_to_pfn(unsigned long vc_addr)
{
unsigned long pfn = vc_addr & 0x3FFFFFFF;
pfn += mm_vc_mem_phys_addr;
pfn >>= PAGE_SHIFT;
return pfn;
}
/*
* Carries over to the state statistics the statistics once owned by a deceased
* resource.
*/
static void vc_sm_resource_deceased(struct sm_resource_t *p_res, int terminated)
{
if (sm_state != NULL) {
if (p_res != NULL) {
int ix;
if (terminated)
sm_state->res_terminated_cnt++;
else
sm_state->res_deceased_cnt++;
for (ix = 0; ix < END_ALL; ix++) {
if (terminated)
sm_state->terminated[ix] +=
p_res->res_stats[ix];
else
sm_state->deceased[ix] +=
p_res->res_stats[ix];
}
}
}
}
/*
* Fetch a videocore handle corresponding to a mapping of the pid+address
* returns 0 (ie NULL) if no such handle exists in the global map.
*/
static unsigned int vmcs_sm_vc_handle_from_pid_and_address(unsigned int pid,
unsigned int addr)
{
struct sm_mmap *map = NULL;
unsigned int handle = 0;
if (!sm_state || addr == 0)
goto out;
mutex_lock(&(sm_state->map_lock));
/* Lookup the resource. */
if (!list_empty(&sm_state->map_list)) {
list_for_each_entry(map, &sm_state->map_list, map_list) {
if (map->res_pid != pid)
continue;
if (addr < map->res_addr ||
addr >= (map->res_addr + map->resource->res_size))
continue;
pr_debug("[%s]: global map %p (pid %u, addr %lx) -> vc-hdl %x (usr-hdl %x)\n",
__func__, map, map->res_pid, map->res_addr,
map->res_vc_hdl, map->res_usr_hdl);
handle = map->res_vc_hdl;
break;
}
}
mutex_unlock(&(sm_state->map_lock));
out:
/*
* Use a debug log here as it may be a valid situation that we query
* for something that is not mapped, we do not want a kernel log each
* time around.
*
* There are other error log that would pop up accordingly if someone
* subsequently tries to use something invalid after being told not to
* use it...
*/
if (handle == 0) {
pr_debug("[%s]: not a valid map (pid %u, addr %x)\n",
__func__, pid, addr);
}
return handle;
}
/*
* Fetch a user handle corresponding to a mapping of the pid+address
* returns 0 (ie NULL) if no such handle exists in the global map.
*/
static unsigned int vmcs_sm_usr_handle_from_pid_and_address(unsigned int pid,
unsigned int addr)
{
struct sm_mmap *map = NULL;
unsigned int handle = 0;
if (!sm_state || addr == 0)
goto out;
mutex_lock(&(sm_state->map_lock));
/* Lookup the resource. */
if (!list_empty(&sm_state->map_list)) {
list_for_each_entry(map, &sm_state->map_list, map_list) {
if (map->res_pid != pid)
continue;
if (addr < map->res_addr ||
addr >= (map->res_addr + map->resource->res_size))
continue;
pr_debug("[%s]: global map %p (pid %u, addr %lx) -> usr-hdl %x (vc-hdl %x)\n",
__func__, map, map->res_pid, map->res_addr,
map->res_usr_hdl, map->res_vc_hdl);
handle = map->res_usr_hdl;
break;
}
}
mutex_unlock(&(sm_state->map_lock));
out:
/*
* Use a debug log here as it may be a valid situation that we query
* for something that is not mapped yet.
*
* There are other error log that would pop up accordingly if someone
* subsequently tries to use something invalid after being told not to
* use it...
*/
if (handle == 0)
pr_debug("[%s]: not a valid map (pid %u, addr %x)\n",
__func__, pid, addr);
return handle;
}
#if defined(DO_NOT_USE)
/*
* Fetch an address corresponding to a mapping of the pid+handle
* returns 0 (ie NULL) if no such address exists in the global map.
*/
static unsigned int vmcs_sm_usr_address_from_pid_and_vc_handle(unsigned int pid,
unsigned int hdl)
{
struct sm_mmap *map = NULL;
unsigned int addr = 0;
if (sm_state == NULL || hdl == 0)
goto out;
mutex_lock(&(sm_state->map_lock));
/* Lookup the resource. */
if (!list_empty(&sm_state->map_list)) {
list_for_each_entry(map, &sm_state->map_list, map_list) {
if (map->res_pid != pid || map->res_vc_hdl != hdl)
continue;
pr_debug("[%s]: global map %p (pid %u, vc-hdl %x, usr-hdl %x) -> addr %lx\n",
__func__, map, map->res_pid, map->res_vc_hdl,
map->res_usr_hdl, map->res_addr);
addr = map->res_addr;
break;
}
}
mutex_unlock(&(sm_state->map_lock));
out:
/*
* Use a debug log here as it may be a valid situation that we query
* for something that is not mapped, we do not want a kernel log each
* time around.
*
* There are other error log that would pop up accordingly if someone
* subsequently tries to use something invalid after being told not to
* use it...
*/
if (addr == 0)
pr_debug("[%s]: not a valid map (pid %u, hdl %x)\n",
__func__, pid, hdl);
return addr;
}
#endif
/*
* Fetch an address corresponding to a mapping of the pid+handle
* returns 0 (ie NULL) if no such address exists in the global map.
*/
static unsigned int vmcs_sm_usr_address_from_pid_and_usr_handle(unsigned int
pid,
unsigned int
hdl)
{
struct sm_mmap *map = NULL;
unsigned int addr = 0;
if (sm_state == NULL || hdl == 0)
goto out;
mutex_lock(&(sm_state->map_lock));
/* Lookup the resource. */
if (!list_empty(&sm_state->map_list)) {
list_for_each_entry(map, &sm_state->map_list, map_list) {
if (map->res_pid != pid || map->res_usr_hdl != hdl)
continue;
pr_debug("[%s]: global map %p (pid %u, vc-hdl %x, usr-hdl %x) -> addr %lx\n",
__func__, map, map->res_pid, map->res_vc_hdl,
map->res_usr_hdl, map->res_addr);
addr = map->res_addr;
break;
}
}
mutex_unlock(&(sm_state->map_lock));
out:
/*
* Use a debug log here as it may be a valid situation that we query
* for something that is not mapped, we do not want a kernel log each
* time around.
*
* There are other error log that would pop up accordingly if someone
* subsequently tries to use something invalid after being told not to
* use it...
*/
if (addr == 0)
pr_debug("[%s]: not a valid map (pid %u, hdl %x)\n", __func__,
pid, hdl);
return addr;
}
/* Adds a resource mapping to the global data list. */
static void vmcs_sm_add_map(struct sm_state_t *state,
struct sm_resource_t *resource, struct sm_mmap *map)
{
mutex_lock(&(state->map_lock));
/* Add to the global list of mappings */
list_add(&map->map_list, &state->map_list);
/* Add to the list of mappings for this resource */
list_add(&map->resource_map_list, &resource->map_list);
resource->map_count++;
mutex_unlock(&(state->map_lock));
pr_debug("[%s]: added map %p (pid %u, vc-hdl %x, usr-hdl %x, addr %lx)\n",
__func__, map, map->res_pid, map->res_vc_hdl,
map->res_usr_hdl, map->res_addr);
}
/* Removes a resource mapping from the global data list. */
static void vmcs_sm_remove_map(struct sm_state_t *state,
struct sm_resource_t *resource,
struct sm_mmap *map)
{
mutex_lock(&(state->map_lock));
/* Remove from the global list of mappings */
list_del(&map->map_list);
/* Remove from the list of mapping for this resource */
list_del(&map->resource_map_list);
if (resource->map_count > 0)
resource->map_count--;
mutex_unlock(&(state->map_lock));
pr_debug("[%s]: removed map %p (pid %d, vc-hdl %x, usr-hdl %x, addr %lx)\n",
__func__, map, map->res_pid, map->res_vc_hdl, map->res_usr_hdl,
map->res_addr);
kfree(map);
}
/* Read callback for the global state proc entry. */
static int vc_sm_global_state_show(struct seq_file *s, void *v)
{
struct sm_mmap *map = NULL;
struct sm_resource_t *resource = NULL;
int map_count = 0;
int resource_count = 0;
if (sm_state == NULL)
return 0;
seq_printf(s, "\nVC-ServiceHandle 0x%x\n",
(unsigned int)sm_state->sm_handle);
/* Log all applicable mapping(s). */
mutex_lock(&(sm_state->map_lock));
seq_puts(s, "\nResources\n");
if (!list_empty(&sm_state->resource_list)) {
list_for_each_entry(resource, &sm_state->resource_list,
global_resource_list) {
resource_count++;
seq_printf(s, "\nResource %p\n",
resource);
seq_printf(s, " PID %u\n",
resource->pid);
seq_printf(s, " RES_GUID 0x%x\n",
resource->res_guid);
seq_printf(s, " LOCK_COUNT %u\n",
resource->lock_count);
seq_printf(s, " REF_COUNT %u\n",
resource->ref_count);
seq_printf(s, " res_handle 0x%X\n",
resource->res_handle);
seq_printf(s, " res_base_mem %p\n",
resource->res_base_mem);
seq_printf(s, " SIZE %d\n",
resource->res_size);
seq_printf(s, " DMABUF %p\n",
resource->dma_buf);
seq_printf(s, " ATTACH %p\n",
resource->attach);
seq_printf(s, " SGT %p\n",
resource->sgt);
seq_printf(s, " DMA_ADDR %pad\n",
&resource->dma_addr);
}
}
seq_printf(s, "\n\nTotal resource count: %d\n\n", resource_count);
seq_puts(s, "\nMappings\n");
if (!list_empty(&sm_state->map_list)) {
list_for_each_entry(map, &sm_state->map_list, map_list) {
map_count++;
seq_printf(s, "\nMapping 0x%x\n",
(unsigned int)map);
seq_printf(s, " TGID %u\n",
map->res_pid);
seq_printf(s, " VC-HDL 0x%x\n",
map->res_vc_hdl);
seq_printf(s, " USR-HDL 0x%x\n",
map->res_usr_hdl);
seq_printf(s, " USR-ADDR 0x%lx\n",
map->res_addr);
seq_printf(s, " SIZE %d\n",
map->resource->res_size);
}
}
mutex_unlock(&(sm_state->map_lock));
seq_printf(s, "\n\nTotal map count: %d\n\n", map_count);
return 0;
}
static int vc_sm_global_statistics_show(struct seq_file *s, void *v)
{
int ix;
/* Global state tracked statistics. */
if (sm_state != NULL) {
seq_puts(s, "\nDeceased Resources Statistics\n");
seq_printf(s, "\nNatural Cause (%u occurences)\n",
sm_state->res_deceased_cnt);
for (ix = 0; ix < END_ATTEMPT; ix++) {
if (sm_state->deceased[ix] > 0) {
seq_printf(s, " %u\t%s\n",
sm_state->deceased[ix],
sm_stats_human_read[ix]);
}
}
seq_puts(s, "\n");
for (ix = 0; ix < END_ATTEMPT; ix++) {
if (sm_state->deceased[ix + END_ATTEMPT] > 0) {
seq_printf(s, " %u\tFAILED %s\n",
sm_state->deceased[ix + END_ATTEMPT],
sm_stats_human_read[ix]);
}
}
seq_printf(s, "\nForcefull (%u occurences)\n",
sm_state->res_terminated_cnt);
for (ix = 0; ix < END_ATTEMPT; ix++) {
if (sm_state->terminated[ix] > 0) {
seq_printf(s, " %u\t%s\n",
sm_state->terminated[ix],
sm_stats_human_read[ix]);
}
}
seq_puts(s, "\n");
for (ix = 0; ix < END_ATTEMPT; ix++) {
if (sm_state->terminated[ix + END_ATTEMPT] > 0) {
seq_printf(s, " %u\tFAILED %s\n",
sm_state->terminated[ix +
END_ATTEMPT],
sm_stats_human_read[ix]);
}
}
}
return 0;
}
#if 0
/* Read callback for the statistics proc entry. */
static int vc_sm_statistics_show(struct seq_file *s, void *v)
{
int ix;
struct sm_priv_data_t *file_data;
struct sm_resource_t *resource;
int res_count = 0;
struct sm_pde_t *p_pde;
p_pde = (struct sm_pde_t *)(s->private);
file_data = (struct sm_priv_data_t *)(p_pde->priv_data);
if (file_data == NULL)
return 0;
/* Per process statistics. */
seq_printf(s, "\nStatistics for TGID %d\n", file_data->pid);
mutex_lock(&(sm_state->map_lock));
if (!list_empty(&file_data->resource_list)) {
list_for_each_entry(resource, &file_data->resource_list,
resource_list) {
res_count++;
seq_printf(s, "\nGUID: 0x%x\n\n",
resource->res_guid);
for (ix = 0; ix < END_ATTEMPT; ix++) {
if (resource->res_stats[ix] > 0) {
seq_printf(s,
" %u\t%s\n",
resource->res_stats[ix],
sm_stats_human_read[ix]);
}
}
seq_puts(s, "\n");
for (ix = 0; ix < END_ATTEMPT; ix++) {
if (resource->res_stats[ix + END_ATTEMPT] > 0) {
seq_printf(s,
" %u\tFAILED %s\n",
resource->res_stats[
ix + END_ATTEMPT],
sm_stats_human_read[ix]);
}
}
}
}
mutex_unlock(&(sm_state->map_lock));
seq_printf(s, "\nResources Count %d\n", res_count);
return 0;
}
#endif
#if 0
/* Read callback for the allocation proc entry. */
static int vc_sm_alloc_show(struct seq_file *s, void *v)
{
struct sm_priv_data_t *file_data;
struct sm_resource_t *resource;
int alloc_count = 0;
struct sm_pde_t *p_pde;
p_pde = (struct sm_pde_t *)(s->private);
file_data = (struct sm_priv_data_t *)(p_pde->priv_data);
if (!file_data)
return 0;
/* Per process statistics. */
seq_printf(s, "\nAllocation for TGID %d\n", file_data->pid);
mutex_lock(&(sm_state->map_lock));
if (!list_empty(&file_data->resource_list)) {
list_for_each_entry(resource, &file_data->resource_list,
resource_list) {
alloc_count++;
seq_printf(s, "\nGUID: 0x%x\n",
resource->res_guid);
seq_printf(s, "Lock Count: %u\n",
resource->lock_count);
seq_printf(s, "Mapped: %s\n",
(resource->map_count ? "yes" : "no"));
seq_printf(s, "VC-handle: 0x%x\n",
resource->res_handle);
seq_printf(s, "VC-address: 0x%p\n",
resource->res_base_mem);
seq_printf(s, "VC-size (bytes): %u\n",
resource->res_size);
seq_printf(s, "Cache: %s\n",
sm_cache_map_vector[resource->res_cached]);
}
}
mutex_unlock(&(sm_state->map_lock));
seq_printf(s, "\n\nTotal allocation count: %d\n\n", alloc_count);
return 0;
}
#endif
static int vc_sm_seq_file_show(struct seq_file *s, void *v)
{
struct sm_pde_t *sm_pde;
sm_pde = (struct sm_pde_t *)(s->private);
if (sm_pde && sm_pde->show)
sm_pde->show(s, v);
return 0;
}
static int vc_sm_single_open(struct inode *inode, struct file *file)
{
return single_open(file, vc_sm_seq_file_show, inode->i_private);
}
static const struct file_operations vc_sm_debug_fs_fops = {
.open = vc_sm_single_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/*
* Adds a resource to the private data list which tracks all the allocated
* data.
*/
static void vmcs_sm_add_resource(struct sm_priv_data_t *privdata,
struct sm_resource_t *resource)
{
mutex_lock(&(sm_state->map_lock));
list_add(&resource->resource_list, &privdata->resource_list);
list_add(&resource->global_resource_list, &sm_state->resource_list);
mutex_unlock(&(sm_state->map_lock));
pr_debug("[%s]: added resource %p (base addr %p, hdl %x, size %u, cache %u)\n",
__func__, resource, resource->res_base_mem,
resource->res_handle, resource->res_size, resource->res_cached);
}
/*
* Locates a resource and acquire a reference on it.
* The resource won't be deleted while there is a reference on it.
*/
static struct sm_resource_t *vmcs_sm_acquire_resource(struct sm_priv_data_t
*private,
unsigned int res_guid)
{
struct sm_resource_t *resource, *ret = NULL;
mutex_lock(&(sm_state->map_lock));
list_for_each_entry(resource, &private->resource_list, resource_list) {
if (resource->res_guid != res_guid)
continue;
pr_debug("[%s]: located resource %p (guid: %x, base addr %p, hdl %x, size %u, cache %u)\n",
__func__, resource, resource->res_guid,
resource->res_base_mem, resource->res_handle,
resource->res_size, resource->res_cached);
resource->ref_count++;
ret = resource;
break;
}
mutex_unlock(&(sm_state->map_lock));
return ret;
}
/*
* Locates a resource and acquire a reference on it.
* The resource won't be deleted while there is a reference on it.
*/
static struct sm_resource_t *vmcs_sm_acquire_first_resource(
struct sm_priv_data_t *private)
{
struct sm_resource_t *resource, *ret = NULL;
mutex_lock(&(sm_state->map_lock));
list_for_each_entry(resource, &private->resource_list, resource_list) {
pr_debug("[%s]: located resource %p (guid: %x, base addr %p, hdl %x, size %u, cache %u)\n",
__func__, resource, resource->res_guid,
resource->res_base_mem, resource->res_handle,
resource->res_size, resource->res_cached);
resource->ref_count++;
ret = resource;
break;
}
mutex_unlock(&(sm_state->map_lock));
return ret;
}
/*
* Locates a resource and acquire a reference on it.
* The resource won't be deleted while there is a reference on it.
*/
static struct sm_resource_t *vmcs_sm_acquire_global_resource(unsigned int
res_guid)
{
struct sm_resource_t *resource, *ret = NULL;
mutex_lock(&(sm_state->map_lock));
list_for_each_entry(resource, &sm_state->resource_list,
global_resource_list) {
if (resource->res_guid != res_guid)
continue;
pr_debug("[%s]: located resource %p (guid: %x, base addr %p, hdl %x, size %u, cache %u)\n",
__func__, resource, resource->res_guid,
resource->res_base_mem, resource->res_handle,
resource->res_size, resource->res_cached);
resource->ref_count++;
ret = resource;
break;
}
mutex_unlock(&(sm_state->map_lock));
return ret;
}
/*
* Release a previously acquired resource.
* The resource will be deleted when its refcount reaches 0.
*/
static void vmcs_sm_release_resource(struct sm_resource_t *resource, int force)
{
struct sm_priv_data_t *private = resource->private;
struct sm_mmap *map, *map_tmp;
struct sm_resource_t *res_tmp;
int ret;
mutex_lock(&(sm_state->map_lock));
if (--resource->ref_count) {
if (force)
pr_err("[%s]: resource %p in use\n", __func__, resource);
mutex_unlock(&(sm_state->map_lock));
return;
}
/* Time to free the resource. Start by removing it from the list */
list_del(&resource->resource_list);
list_del(&resource->global_resource_list);
/*
* Walk the global resource list, find out if the resource is used
* somewhere else. In which case we don't want to delete it.
*/
list_for_each_entry(res_tmp, &sm_state->resource_list,
global_resource_list) {
if (res_tmp->res_handle == resource->res_handle) {
resource->res_handle = 0;
break;
}
}
mutex_unlock(&(sm_state->map_lock));
pr_debug("[%s]: freeing data - guid %x, hdl %x, base address %p\n",
__func__, resource->res_guid, resource->res_handle,
resource->res_base_mem);
resource->res_stats[FREE]++;
/* Make sure the resource we're removing is unmapped first */
if (resource->map_count && !list_empty(&resource->map_list)) {
down_write(¤t->mm->mmap_sem);
list_for_each_entry_safe(map, map_tmp, &resource->map_list,
resource_map_list) {
ret =
do_munmap(current->mm, map->res_addr,
resource->res_size, NULL);
if (ret) {
pr_err("[%s]: could not unmap resource %p\n",
__func__, resource);
}
}
up_write(¤t->mm->mmap_sem);
}
/* Free up the videocore allocated resource. */
if (resource->res_handle) {
struct vc_sm_free_t free = {
resource->res_handle, (uint32_t)resource->res_base_mem
};
int status = vc_vchi_sm_free(sm_state->sm_handle, &free,
&private->int_trans_id);
if (status != 0 && status != -EINTR) {
pr_err("[%s]: failed to free memory on videocore (status: %u, trans_id: %u)\n",
__func__, status, private->int_trans_id);
resource->res_stats[FREE_FAIL]++;
ret = -EPERM;
}
}
if (resource->sgt)
dma_buf_unmap_attachment(resource->attach, resource->sgt,
DMA_BIDIRECTIONAL);
if (resource->attach)
dma_buf_detach(resource->dma_buf, resource->attach);
if (resource->dma_buf)
dma_buf_put(resource->dma_buf);
/* Free up the shared resource. */
if (resource->res_shared)
vmcs_sm_release_resource(resource->res_shared, 0);
/* Free up the local resource tracking this allocation. */
vc_sm_resource_deceased(resource, force);
kfree(resource);
}
/*
* Dump the map table for the driver. If process is -1, dumps the whole table,
* if process is a valid pid (non -1) dump only the entries associated with the
* pid of interest.
*/
static void vmcs_sm_host_walk_map_per_pid(int pid)
{
struct sm_mmap *map = NULL;
/* Make sure the device was started properly. */
if (sm_state == NULL) {
pr_err("[%s]: invalid device\n", __func__);
return;
}
mutex_lock(&(sm_state->map_lock));
/* Log all applicable mapping(s). */
if (!list_empty(&sm_state->map_list)) {
list_for_each_entry(map, &sm_state->map_list, map_list) {
if (pid == -1 || map->res_pid == pid) {
pr_info("[%s]: tgid: %u - vc-hdl: %x, usr-hdl: %x, usr-addr: %lx\n",
__func__, map->res_pid, map->res_vc_hdl,
map->res_usr_hdl, map->res_addr);
}
}
}
mutex_unlock(&(sm_state->map_lock));
}
/*
* Dump the allocation table from host side point of view. This only dumps the
* data allocated for this process/device referenced by the file_data.
*/
static void vmcs_sm_host_walk_alloc(struct sm_priv_data_t *file_data)
{
struct sm_resource_t *resource = NULL;
/* Make sure the device was started properly. */
if ((sm_state == NULL) || (file_data == NULL)) {
pr_err("[%s]: invalid device\n", __func__);
return;
}
mutex_lock(&(sm_state->map_lock));
if (!list_empty(&file_data->resource_list)) {
list_for_each_entry(resource, &file_data->resource_list,
resource_list) {
pr_info("[%s]: guid: %x - hdl: %x, vc-mem: %p, size: %u, cache: %u\n",
__func__, resource->res_guid, resource->res_handle,
resource->res_base_mem, resource->res_size,
resource->res_cached);
}
}
mutex_unlock(&(sm_state->map_lock));
}
/* Create support for private data tracking. */
static struct sm_priv_data_t *vc_sm_create_priv_data(pid_t id)
{
char alloc_name[32];
struct sm_priv_data_t *file_data = NULL;
/* Allocate private structure. */
file_data = kzalloc(sizeof(*file_data), GFP_KERNEL);
if (!file_data) {
pr_err("[%s]: cannot allocate file data\n", __func__);
goto out;
}
snprintf(alloc_name, sizeof(alloc_name), "%d", id);
INIT_LIST_HEAD(&file_data->resource_list);
file_data->pid = id;
file_data->dir_pid = debugfs_create_dir(alloc_name,
sm_state->dir_alloc);
#if 0
/* TODO: fix this to support querying statistics per pid */
if (IS_ERR_OR_NULL(file_data->dir_pid)) {
file_data->dir_pid = NULL;
} else {
struct dentry *dir_entry;
dir_entry = debugfs_create_file(VC_SM_RESOURCES, 0444,
file_data->dir_pid, file_data,
vc_sm_debug_fs_fops);
file_data->dir_res.dir_entry = dir_entry;
file_data->dir_res.priv_data = file_data;
file_data->dir_res.show = &vc_sm_alloc_show;
dir_entry = debugfs_create_file(VC_SM_STATS, 0444,
file_data->dir_pid, file_data,
vc_sm_debug_fs_fops);
file_data->dir_res.dir_entry = dir_entry;
file_data->dir_res.priv_data = file_data;
file_data->dir_res.show = &vc_sm_statistics_show;
}
pr_debug("[%s]: private data allocated %p\n", __func__, file_data);
#endif
out:
return file_data;
}
/*
* Open the device. Creates a private state to help track all allocation
* associated with this device.
*/
static int vc_sm_open(struct inode *inode, struct file *file)
{
int ret = 0;
/* Make sure the device was started properly. */
if (!sm_state) {
pr_err("[%s]: invalid device\n", __func__);
ret = -EPERM;
goto out;
}
file->private_data = vc_sm_create_priv_data(current->tgid);
if (file->private_data == NULL) {
pr_err("[%s]: failed to create data tracker\n", __func__);
ret = -ENOMEM;
goto out;
}
out:
return ret;
}
/*
* Close the device. Free up all resources still associated with this device
* at the time.
*/
static int vc_sm_release(struct inode *inode, struct file *file)
{
struct sm_priv_data_t *file_data =
(struct sm_priv_data_t *)file->private_data;
struct sm_resource_t *resource;
int ret = 0;
/* Make sure the device was started properly. */
if (sm_state == NULL || file_data == NULL) {
pr_err("[%s]: invalid device\n", __func__);
ret = -EPERM;
goto out;
}
pr_debug("[%s]: using private data %p\n", __func__, file_data);
if (file_data->restart_sys == -EINTR) {
struct vc_sm_action_clean_t action_clean;
pr_debug("[%s]: releasing following EINTR on %u (trans_id: %u) (likely due to signal)...\n",
__func__, file_data->int_action,
file_data->int_trans_id);
action_clean.res_action = file_data->int_action;
action_clean.action_trans_id = file_data->int_trans_id;
vc_vchi_sm_clean_up(sm_state->sm_handle, &action_clean);
}
while ((resource = vmcs_sm_acquire_first_resource(file_data)) != NULL) {
vmcs_sm_release_resource(resource, 0);
vmcs_sm_release_resource(resource, 1);
}
/* Remove the corresponding proc entry. */
debugfs_remove_recursive(file_data->dir_pid);
/* Terminate the private data. */
kfree(file_data);
out:
return ret;
}
static void vcsm_vma_open(struct vm_area_struct *vma)
{
struct sm_mmap *map = (struct sm_mmap *)vma->vm_private_data;
pr_debug("[%s]: virt %lx-%lx, pid %i, pfn %i\n",
__func__, vma->vm_start, vma->vm_end, (int)current->tgid,
(int)vma->vm_pgoff);
map->ref_count++;
}
static void vcsm_vma_close(struct vm_area_struct *vma)
{
struct sm_mmap *map = (struct sm_mmap *)vma->vm_private_data;
pr_debug("[%s]: virt %lx-%lx, pid %i, pfn %i\n",
__func__, vma->vm_start, vma->vm_end, (int)current->tgid,
(int)vma->vm_pgoff);
map->ref_count--;
/* Remove from the map table. */
if (map->ref_count == 0)
vmcs_sm_remove_map(sm_state, map->resource, map);
}
static int vcsm_vma_fault(struct vm_fault *vmf)
{
struct sm_mmap *map = (struct sm_mmap *)vmf->vma->vm_private_data;
struct sm_resource_t *resource = map->resource;
pgoff_t page_offset;
unsigned long pfn;
int ret = 0;
/* Lock the resource if necessary. */
if (!resource->lock_count) {
struct vc_sm_lock_unlock_t lock_unlock;
struct vc_sm_lock_result_t lock_result;
int status;
lock_unlock.res_handle = resource->res_handle;
lock_unlock.res_mem = (uint32_t)resource->res_base_mem;
pr_debug("[%s]: attempt to lock data - hdl %x, base address %p\n",
__func__, lock_unlock.res_handle,
(void *)lock_unlock.res_mem);
/* Lock the videocore allocated resource. */
status = vc_vchi_sm_lock(sm_state->sm_handle,
&lock_unlock, &lock_result, 0);
if (status || !lock_result.res_mem) {
pr_err("[%s]: failed to lock memory on videocore (status: %u)\n",
__func__, status);
resource->res_stats[LOCK_FAIL]++;
return VM_FAULT_SIGBUS;
}
pfn = vcaddr_to_pfn((unsigned long)resource->res_base_mem);
outer_inv_range(__pfn_to_phys(pfn),
__pfn_to_phys(pfn) + resource->res_size);
resource->res_stats[LOCK]++;
resource->lock_count++;
/* Keep track of the new base memory. */
if (lock_result.res_mem &&
lock_result.res_old_mem &&
(lock_result.res_mem != lock_result.res_old_mem)) {
resource->res_base_mem = (void *)lock_result.res_mem;
}
}
/* We don't use vmf->pgoff since that has the fake offset */
page_offset = ((unsigned long)vmf->address - vmf->vma->vm_start);
pfn = (uint32_t)resource->res_base_mem & 0x3FFFFFFF;
pfn += mm_vc_mem_phys_addr;
pfn += page_offset;
pfn >>= PAGE_SHIFT;
/* Finally, remap it */
ret = vm_insert_pfn(vmf->vma, (unsigned long)vmf->address, pfn);
switch (ret) {
case 0:
case -ERESTARTSYS:
/*
* EBUSY is ok: this just means that another thread
* already did the job.
*/
case -EBUSY:
return VM_FAULT_NOPAGE;
case -ENOMEM:
case -EAGAIN:
pr_err("[%s]: failed to map page pfn:%lx virt:%lx ret:%d\n", __func__,
pfn, (unsigned long)vmf->address, ret);
return VM_FAULT_OOM;
default:
pr_err("[%s]: failed to map page pfn:%lx virt:%lx ret:%d\n", __func__,
pfn, (unsigned long)vmf->address, ret);
return VM_FAULT_SIGBUS;
}
}
static const struct vm_operations_struct vcsm_vm_ops = {
.open = vcsm_vma_open,
.close = vcsm_vma_close,
.fault = vcsm_vma_fault,
};
/* Converts VCSM_CACHE_OP_* to an operating function. */
static void (*cache_op_to_func(const unsigned cache_op))
(const void*, const void*)
{
switch (cache_op) {
case VCSM_CACHE_OP_NOP:
return NULL;
case VCSM_CACHE_OP_INV:
return dmac_inv_range;
case VCSM_CACHE_OP_CLEAN:
return dmac_clean_range;
case VCSM_CACHE_OP_FLUSH:
return dmac_flush_range;
default:
pr_err("[%s]: Invalid cache_op: 0x%08x\n", __func__, cache_op);
return NULL;
}
}
/*
* Clean/invalid/flush cache of which buffer is already pinned (i.e. accessed).
*/
static int clean_invalid_contiguous_mem_2d(const void __user *addr,
const size_t block_count, const size_t block_size, const size_t stride,
const unsigned cache_op)
{
size_t i;
void (*op_fn)(const void*, const void*);
if (!block_size) {
pr_err("[%s]: size cannot be 0\n", __func__);
return -EINVAL;
}
op_fn = cache_op_to_func(cache_op);
if (op_fn == NULL)
return -EINVAL;
for (i = 0; i < block_count; i ++, addr += stride)
op_fn(addr, addr + block_size);
return 0;
}
/* Clean/invalid/flush cache of which buffer may be non-pinned. */
/* The caller must lock current->mm->mmap_sem for read. */
static int clean_invalid_mem_walk(unsigned long addr, const size_t size,
const unsigned cache_op)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
unsigned long pgd_next, pud_next, pmd_next;
const unsigned long end = ALIGN(addr + size, PAGE_SIZE);
void (*op_fn)(const void*, const void*);
addr &= PAGE_MASK;
if (addr >= end)
return 0;
op_fn = cache_op_to_func(cache_op);
if (op_fn == NULL)
return -EINVAL;
/* Walk PGD */
pgd = pgd_offset(current->mm, addr);
do {
pgd_next = pgd_addr_end(addr, end);
if (pgd_none(*pgd) || pgd_bad(*pgd))
continue;
/* Walk PUD */
pud = pud_offset(pgd, addr);
do {
pud_next = pud_addr_end(addr, pgd_next);
if (pud_none(*pud) || pud_bad(*pud))
continue;
/* Walk PMD */
pmd = pmd_offset(pud, addr);
do {
pmd_next = pmd_addr_end(addr, pud_next);
if (pmd_none(*pmd) || pmd_bad(*pmd))
continue;
/* Walk PTE */
pte = pte_offset_map(pmd, addr);
do {
if (pte_none(*pte) || !pte_present(*pte))
continue;
op_fn((const void __user*) addr,
(const void __user*) (addr + PAGE_SIZE));
} while (pte++, addr += PAGE_SIZE, addr != pmd_next);
pte_unmap(pte);
} while (pmd++, addr = pmd_next, addr != pud_next);
} while (pud++, addr = pud_next, addr != pgd_next);
} while (pgd++, addr = pgd_next, addr != end);
return 0;
}
/* Clean/invalid/flush cache of buffer in resource */
static int clean_invalid_resource_walk(const void __user *addr,
const size_t size, const unsigned cache_op, const int usr_hdl,
struct sm_resource_t *resource)
{
int err;
enum sm_stats_t stat_attempt, stat_failure;
void __user *res_addr;
if (resource == NULL) {
pr_err("[%s]: resource is NULL\n", __func__);
return -EINVAL;
}
if (resource->res_cached != VMCS_SM_CACHE_HOST &&
resource->res_cached != VMCS_SM_CACHE_BOTH)
return 0;
switch (cache_op) {
case VCSM_CACHE_OP_NOP:
return 0;
case VCSM_CACHE_OP_INV:
stat_attempt = INVALID;
stat_failure = INVALID_FAIL;
break;
case VCSM_CACHE_OP_CLEAN:
/* Like the original VMCS_SM_CMD_CLEAN_INVALID ioctl handler does. */
stat_attempt = FLUSH;
stat_failure = FLUSH_FAIL;
break;
case VCSM_CACHE_OP_FLUSH:
stat_attempt = FLUSH;
stat_failure = FLUSH_FAIL;
break;
default:
pr_err("[%s]: Invalid cache_op: 0x%08x\n", __func__, cache_op);
return -EINVAL;
}
resource->res_stats[stat_attempt]++;
if (size > resource->res_size) {
pr_err("[%s]: size (0x%08zu) is larger than res_size (0x%08zu)\n",
__func__, size, resource->res_size);
return -EFAULT;
}
res_addr = (void __user*) vmcs_sm_usr_address_from_pid_and_usr_handle(
current->tgid, usr_hdl);
if (res_addr == NULL) {
pr_err("[%s]: Failed to get user address "
"from pid (%d) and user handle (%d)\n", __func__, current->tgid,
resource->res_handle);
return -EINVAL;
}
if (!(res_addr <= addr && addr + size <= res_addr + resource->res_size)) {
pr_err("[%s]: Addr (0x%p-0x%p) out of range (0x%p-0x%p)\n",
__func__, addr, addr + size, res_addr,
res_addr + resource->res_size);
return -EFAULT;
}
down_read(¤t->mm->mmap_sem);
err = clean_invalid_mem_walk((unsigned long) addr, size, cache_op);
up_read(¤t->mm->mmap_sem);
if (err)
resource->res_stats[stat_failure]++;
return err;
}
/* Map an allocated data into something that the user space. */
static int vc_sm_mmap(struct file *file, struct vm_area_struct *vma)
{
int ret = 0;
struct sm_priv_data_t *file_data =
(struct sm_priv_data_t *)file->private_data;
struct sm_resource_t *resource = NULL;
struct sm_mmap *map = NULL;
/* Make sure the device was started properly. */
if ((sm_state == NULL) || (file_data == NULL)) {
pr_err("[%s]: invalid device\n", __func__);
return -EPERM;
}
pr_debug("[%s]: private data %p, guid %x\n", __func__, file_data,
((unsigned int)vma->vm_pgoff << PAGE_SHIFT));
/*
* We lookup to make sure that the data we are being asked to mmap is
* something that we allocated.
*
* We use the offset information as the key to tell us which resource
* we are mapping.
*/
resource = vmcs_sm_acquire_resource(file_data,
((unsigned int)vma->vm_pgoff <<
PAGE_SHIFT));
if (resource == NULL) {
pr_err("[%s]: failed to locate resource for guid %x\n", __func__,
((unsigned int)vma->vm_pgoff << PAGE_SHIFT));
return -ENOMEM;
}
pr_debug("[%s]: guid %x, tgid %u, %u, %u\n",
__func__, resource->res_guid, current->tgid, resource->pid,
file_data->pid);
/* Check permissions. */
if (resource->pid && (resource->pid != current->tgid)) {
pr_err("[%s]: current tgid %u != %u owner\n",
__func__, current->tgid, resource->pid);
ret = -EPERM;
goto error;
}
/* Verify that what we are asked to mmap is proper. */
if (resource->res_size != (unsigned int)(vma->vm_end - vma->vm_start)) {
pr_err("[%s]: size inconsistency (resource: %u - mmap: %u)\n",
__func__,
resource->res_size,
(unsigned int)(vma->vm_end - vma->vm_start));
ret = -EINVAL;
goto error;
}
/*
* Keep track of the tuple in the global resource list such that one
* can do a mapping lookup for address/memory handle.
*/
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (map == NULL) {
pr_err("[%s]: failed to allocate global tracking resource\n",
__func__);
ret = -ENOMEM;
goto error;
}
map->res_pid = current->tgid;
map->res_vc_hdl = resource->res_handle;
map->res_usr_hdl = resource->res_guid;
map->res_addr = (unsigned long)vma->vm_start;
map->resource = resource;
map->vma = vma;
vmcs_sm_add_map(sm_state, resource, map);
/*
* We are not actually mapping the pages, we just provide a fault
* handler to allow pages to be mapped when accessed
*/
vma->vm_flags |=
VM_IO | VM_PFNMAP | VM_DONTCOPY | VM_DONTEXPAND;
vma->vm_ops = &vcsm_vm_ops;
vma->vm_private_data = map;
/* vm_pgoff is the first PFN of the mapped memory */
vma->vm_pgoff = (unsigned long)resource->res_base_mem & 0x3FFFFFFF;
vma->vm_pgoff += mm_vc_mem_phys_addr;
vma->vm_pgoff >>= PAGE_SHIFT;
if ((resource->res_cached == VMCS_SM_CACHE_NONE) ||
(resource->res_cached == VMCS_SM_CACHE_VC)) {
/* Allocated non host cached memory, honour it. */
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
}
pr_debug("[%s]: resource %p (guid %x) - cnt %u, base address %p, handle %x, size %u (%u), cache %u\n",
__func__,
resource, resource->res_guid, resource->lock_count,
resource->res_base_mem, resource->res_handle,
resource->res_size, (unsigned int)(vma->vm_end - vma->vm_start),
resource->res_cached);
pr_debug("[%s]: resource %p (base address %p, handle %x) - map-count %d, usr-addr %x\n",
__func__, resource, resource->res_base_mem,
resource->res_handle, resource->map_count,
(unsigned int)vma->vm_start);
vcsm_vma_open(vma);
resource->res_stats[MAP]++;
vmcs_sm_release_resource(resource, 0);
if (resource->map) {
/* We don't use vmf->pgoff since that has the fake offset */
unsigned long addr;
for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
/* Finally, remap it */
unsigned long pfn = (unsigned long)resource->res_base_mem & 0x3FFFFFFF;
pfn += mm_vc_mem_phys_addr;
pfn += addr - vma->vm_start;
pfn >>= PAGE_SHIFT;
ret = vm_insert_pfn(vma, addr, pfn);
}
}
return 0;
error:
resource->res_stats[MAP_FAIL]++;
vmcs_sm_release_resource(resource, 0);
return ret;
}
/* Allocate a shared memory handle and block. */
static int vc_sm_ioctl_alloc(struct sm_priv_data_t *private,
struct vmcs_sm_ioctl_alloc *ioparam)
{
int ret = 0;
int status;
struct sm_resource_t *resource;
struct vc_sm_alloc_t alloc = { 0 };
struct vc_sm_alloc_result_t result = { 0 };
enum vmcs_sm_cache_e cached = ioparam->cached;
bool map = false;
/* flag to requst buffer is mapped up front, rather than lazily */
if (cached & 0x80) {
map = true;
cached &= ~0x80;
}
/* Setup our allocation parameters */
alloc.type = ((cached == VMCS_SM_CACHE_VC)
|| (cached ==
VMCS_SM_CACHE_BOTH)) ? VC_SM_ALLOC_CACHED :
VC_SM_ALLOC_NON_CACHED;
alloc.base_unit = ioparam->size;
alloc.num_unit = ioparam->num;
alloc.allocator = current->tgid;
/* Align to kernel page size */
alloc.alignement = 4096;
/* Align the size to the kernel page size */
alloc.base_unit =
(alloc.base_unit + alloc.alignement - 1) & ~(alloc.alignement - 1);
if (*ioparam->name) {
memcpy(alloc.name, ioparam->name, sizeof(alloc.name) - 1);
} else {
memcpy(alloc.name, VMCS_SM_RESOURCE_NAME_DEFAULT,
sizeof(VMCS_SM_RESOURCE_NAME_DEFAULT));
}
pr_debug("[%s]: attempt to allocate \"%s\" data - type %u, base %u (%u), num %u, alignement %u\n",
__func__, alloc.name, alloc.type, ioparam->size,
alloc.base_unit, alloc.num_unit, alloc.alignement);
/* Allocate local resource to track this allocation. */
resource = kzalloc(sizeof(*resource), GFP_KERNEL);
if (!resource) {
ret = -ENOMEM;
goto error;
}
INIT_LIST_HEAD(&resource->map_list);
resource->ref_count++;
resource->pid = current->tgid;
/* Allocate the videocore resource. */
status = vc_vchi_sm_alloc(sm_state->sm_handle, &alloc, &result,
&private->int_trans_id);
if (status == -EINTR) {
pr_debug("[%s]: requesting allocate memory action restart (trans_id: %u)\n",
__func__, private->int_trans_id);
ret = -ERESTARTSYS;
private->restart_sys = -EINTR;
private->int_action = VC_SM_MSG_TYPE_ALLOC;
goto error;
} else if (status != 0 || !result.res_mem) {
pr_err("[%s]: failed to allocate memory on videocore (status: %u, trans_id: %u)\n",
__func__, status, private->int_trans_id);
ret = -ENOMEM;
resource->res_stats[ALLOC_FAIL]++;
goto error;
}
/* Keep track of the resource we created. */
resource->private = private;
resource->res_handle = result.res_handle;
resource->res_base_mem = (void *)result.res_mem;
resource->res_size = alloc.base_unit * alloc.num_unit;
resource->res_cached = cached;
resource->map = map;
/*
* Kernel/user GUID. This global identifier is used for mmap'ing the
* allocated region from user space, it is passed as the mmap'ing
* offset, we use it to 'hide' the videocore handle/address.
*/
mutex_lock(&sm_state->lock);
resource->res_guid = ++sm_state->guid;
mutex_unlock(&sm_state->lock);
resource->res_guid <<= PAGE_SHIFT;
vmcs_sm_add_resource(private, resource);
pr_debug("[%s]: allocated data - guid %x, hdl %x, base address %p, size %d, cache %d\n",
__func__, resource->res_guid, resource->res_handle,
resource->res_base_mem, resource->res_size,
resource->res_cached);
/* We're done */
resource->res_stats[ALLOC]++;
ioparam->handle = resource->res_guid;
return 0;
error:
pr_err("[%s]: failed to allocate \"%s\" data (%i) - type %u, base %u (%u), num %u, alignment %u\n",
__func__, alloc.name, ret, alloc.type, ioparam->size,
alloc.base_unit, alloc.num_unit, alloc.alignement);
if (resource != NULL) {
vc_sm_resource_deceased(resource, 1);
kfree(resource);
}
return ret;
}
/* Share an allocate memory handle and block.*/
static int vc_sm_ioctl_alloc_share(struct sm_priv_data_t *private,
struct vmcs_sm_ioctl_alloc_share *ioparam)
{
struct sm_resource_t *resource, *shared_resource;
int ret = 0;
pr_debug("[%s]: attempt to share resource %u\n", __func__,
ioparam->handle);
shared_resource = vmcs_sm_acquire_global_resource(ioparam->handle);
if (shared_resource == NULL) {
ret = -ENOMEM;
goto error;
}
/* Allocate local resource to track this allocation. */
resource = kzalloc(sizeof(*resource), GFP_KERNEL);
if (resource == NULL) {
pr_err("[%s]: failed to allocate local tracking resource\n",
__func__);
ret = -ENOMEM;
goto error;
}
INIT_LIST_HEAD(&resource->map_list);
resource->ref_count++;
resource->pid = current->tgid;
/* Keep track of the resource we created. */
resource->private = private;
resource->res_handle = shared_resource->res_handle;
resource->res_base_mem = shared_resource->res_base_mem;
resource->res_size = shared_resource->res_size;
resource->res_cached = shared_resource->res_cached;
resource->res_shared = shared_resource;
mutex_lock(&sm_state->lock);
resource->res_guid = ++sm_state->guid;
mutex_unlock(&sm_state->lock);
resource->res_guid <<= PAGE_SHIFT;
vmcs_sm_add_resource(private, resource);
pr_debug("[%s]: allocated data - guid %x, hdl %x, base address %p, size %d, cache %d\n",
__func__, resource->res_guid, resource->res_handle,
resource->res_base_mem, resource->res_size,
resource->res_cached);
/* We're done */
resource->res_stats[ALLOC]++;
ioparam->handle = resource->res_guid;
ioparam->size = resource->res_size;
return 0;
error:
pr_err("[%s]: failed to share %u\n", __func__, ioparam->handle);
if (shared_resource != NULL)
vmcs_sm_release_resource(shared_resource, 0);
return ret;
}
/* Free a previously allocated shared memory handle and block.*/
static int vc_sm_ioctl_free(struct sm_priv_data_t *private,
struct vmcs_sm_ioctl_free *ioparam)
{
struct sm_resource_t *resource =
vmcs_sm_acquire_resource(private, ioparam->handle);
if (resource == NULL) {
pr_err("[%s]: resource for guid %u does not exist\n", __func__,
ioparam->handle);
return -EINVAL;
}
/* Check permissions. */
if (resource->pid && (resource->pid != current->tgid)) {
pr_err("[%s]: current tgid %u != %u owner\n",
__func__, current->tgid, resource->pid);
vmcs_sm_release_resource(resource, 0);
return -EPERM;
}
vmcs_sm_release_resource(resource, 0);
vmcs_sm_release_resource(resource, 0);
return 0;
}
/* Resize a previously allocated shared memory handle and block. */
static int vc_sm_ioctl_resize(struct sm_priv_data_t *private,
struct vmcs_sm_ioctl_resize *ioparam)
{
int ret = 0;
int status;
struct vc_sm_resize_t resize;
struct sm_resource_t *resource;
/* Locate resource from GUID. */
resource = vmcs_sm_acquire_resource(private, ioparam->handle);
if (!resource) {
pr_err("[%s]: failed resource - guid %x\n",
__func__, ioparam->handle);
ret = -EFAULT;
goto error;
}
/*
* If the resource is locked, its reference count will be not NULL,
* in which case we will not be allowed to resize it anyways, so
* reject the attempt here.
*/
if (resource->lock_count != 0) {
pr_err("[%s]: cannot resize - guid %x, ref-cnt %d\n",
__func__, ioparam->handle, resource->lock_count);
ret = -EFAULT;
goto error;
}
/* Check permissions. */
if (resource->pid && (resource->pid != current->tgid)) {
pr_err("[%s]: current tgid %u != %u owner\n", __func__,
current->tgid, resource->pid);
ret = -EPERM;
goto error;
}
if (resource->map_count != 0) {
pr_err("[%s]: cannot resize - guid %x, ref-cnt %d\n",
__func__, ioparam->handle, resource->map_count);
ret = -EFAULT;
goto error;
}
resize.res_handle = resource->res_handle;
resize.res_mem = (uint32_t)resource->res_base_mem;
resize.res_new_size = ioparam->new_size;
pr_debug("[%s]: attempt to resize data - guid %x, hdl %x, base address %p\n",
__func__, ioparam->handle, resize.res_handle,
(void *)resize.res_mem);
/* Resize the videocore allocated resource. */
status = vc_vchi_sm_resize(sm_state->sm_handle, &resize,
&private->int_trans_id);
if (status == -EINTR) {
pr_debug("[%s]: requesting resize memory action restart (trans_id: %u)\n",
__func__, private->int_trans_id);
ret = -ERESTARTSYS;
private->restart_sys = -EINTR;
private->int_action = VC_SM_MSG_TYPE_RESIZE;
goto error;
} else if (status) {
pr_err("[%s]: failed to resize memory on videocore (status: %u, trans_id: %u)\n",
__func__, status, private->int_trans_id);
ret = -EPERM;
goto error;
}
pr_debug("[%s]: success to resize data - hdl %x, size %d -> %d\n",
__func__, resize.res_handle, resource->res_size,
resize.res_new_size);
/* Successfully resized, save the information and inform the user. */
ioparam->old_size = resource->res_size;
resource->res_size = resize.res_new_size;
error:
if (resource)
vmcs_sm_release_resource(resource, 0);
return ret;
}
/* Lock a previously allocated shared memory handle and block. */
static int vc_sm_ioctl_lock(struct sm_priv_data_t *private,
struct vmcs_sm_ioctl_lock_unlock *ioparam,
int change_cache, enum vmcs_sm_cache_e cache_type,
unsigned int vc_addr)
{
int status;
struct vc_sm_lock_unlock_t lock;
struct vc_sm_lock_result_t result;
struct sm_resource_t *resource;
int ret = 0;
struct sm_mmap *map, *map_tmp;
unsigned long phys_addr;
map = NULL;
/* Locate resource from GUID. */
resource = vmcs_sm_acquire_resource(private, ioparam->handle);
if (resource == NULL) {
ret = -EINVAL;
goto error;
}
/* Check permissions. */
if (resource->pid && (resource->pid != current->tgid)) {
pr_err("[%s]: current tgid %u != %u owner\n", __func__,
current->tgid, resource->pid);
ret = -EPERM;
goto error;
}
lock.res_handle = resource->res_handle;
lock.res_mem = (uint32_t)resource->res_base_mem;
/* Take the lock and get the address to be mapped. */
if (vc_addr == 0) {
pr_debug("[%s]: attempt to lock data - guid %x, hdl %x, base address %p\n",
__func__, ioparam->handle, lock.res_handle,
(void *)lock.res_mem);
/* Lock the videocore allocated resource. */
status = vc_vchi_sm_lock(sm_state->sm_handle, &lock, &result,
&private->int_trans_id);
if (status == -EINTR) {
pr_debug("[%s]: requesting lock memory action restart (trans_id: %u)\n",
__func__, private->int_trans_id);
ret = -ERESTARTSYS;
private->restart_sys = -EINTR;
private->int_action = VC_SM_MSG_TYPE_LOCK;
goto error;
} else if (status ||
(!status && !(void *)result.res_mem)) {
pr_err("[%s]: failed to lock memory on videocore (status: %u, trans_id: %u)\n",
__func__, status, private->int_trans_id);
ret = -EPERM;
resource->res_stats[LOCK_FAIL]++;
goto error;
}
pr_debug("[%s]: succeed to lock data - hdl %x, base address %p (%p), ref-cnt %d\n",
__func__, lock.res_handle, (void *)result.res_mem,
(void *)lock.res_mem, resource->lock_count);
}
/* Lock assumed taken already, address to be mapped is known. */
else
resource->res_base_mem = (void *)vc_addr;
resource->res_stats[LOCK]++;
resource->lock_count++;
/* Keep track of the new base memory allocation if it has changed. */
if ((vc_addr == 0) &&
((void *)result.res_mem) &&
((void *)result.res_old_mem) &&
(result.res_mem != result.res_old_mem)) {
resource->res_base_mem = (void *)result.res_mem;
/* Kernel allocated resources. */
if (resource->pid == 0) {
if (!list_empty(&resource->map_list)) {
list_for_each_entry_safe(map, map_tmp,
&resource->map_list,
resource_map_list) {
if (map->res_addr) {
iounmap((void *)map->res_addr);
map->res_addr = 0;
vmcs_sm_remove_map(sm_state,
map->resource,
map);
break;
}
}
}
}
}
if (change_cache)
resource->res_cached = cache_type;
if (resource->map_count) {
ioparam->addr =
vmcs_sm_usr_address_from_pid_and_usr_handle(
current->tgid, ioparam->handle);
pr_debug("[%s] map_count %d private->pid %d current->tgid %d hnd %x addr %u\n",
__func__, resource->map_count, private->pid,
current->tgid, ioparam->handle, ioparam->addr);
} else {
/* Kernel allocated resources. */
if (resource->pid == 0) {
pr_debug("[%s]: attempt mapping kernel resource - guid %x, hdl %x\n",
__func__, ioparam->handle, lock.res_handle);
ioparam->addr = 0;
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (map == NULL) {
pr_err("[%s]: failed allocating tracker\n",
__func__);
ret = -ENOMEM;
goto error;
} else {
phys_addr = (uint32_t)resource->res_base_mem &
0x3FFFFFFF;
phys_addr += mm_vc_mem_phys_addr;
if (resource->res_cached
== VMCS_SM_CACHE_HOST) {
ioparam->addr = (unsigned long)
/* TODO - make cached work */
ioremap_nocache(phys_addr,
resource->res_size);
pr_debug("[%s]: mapping kernel - guid %x, hdl %x - cached mapping %u\n",
__func__, ioparam->handle,
lock.res_handle, ioparam->addr);
} else {
ioparam->addr = (unsigned long)
ioremap_nocache(phys_addr,
resource->res_size);
pr_debug("[%s]: mapping kernel- guid %x, hdl %x - non cached mapping %u\n",
__func__, ioparam->handle,
lock.res_handle, ioparam->addr);
}
map->res_pid = 0;
map->res_vc_hdl = resource->res_handle;
map->res_usr_hdl = resource->res_guid;
map->res_addr = ioparam->addr;
map->resource = resource;
map->vma = NULL;
vmcs_sm_add_map(sm_state, resource, map);
}
} else
ioparam->addr = 0;
}
error:
if (resource)
vmcs_sm_release_resource(resource, 0);
return ret;
}
/* Unlock a previously allocated shared memory handle and block.*/
static int vc_sm_ioctl_unlock(struct sm_priv_data_t *private,
struct vmcs_sm_ioctl_lock_unlock *ioparam,
int flush, int wait_reply, int no_vc_unlock)
{
int status;
struct vc_sm_lock_unlock_t unlock;
struct sm_mmap *map, *map_tmp;
struct sm_resource_t *resource;
int ret = 0;
map = NULL;
/* Locate resource from GUID. */
resource = vmcs_sm_acquire_resource(private, ioparam->handle);
if (resource == NULL) {
ret = -EINVAL;
goto error;
}
/* Check permissions. */
if (resource->pid && (resource->pid != current->tgid)) {
pr_err("[%s]: current tgid %u != %u owner\n",
__func__, current->tgid, resource->pid);
ret = -EPERM;
goto error;
}
unlock.res_handle = resource->res_handle;
unlock.res_mem = (uint32_t)resource->res_base_mem;
pr_debug("[%s]: attempt to unlock data - guid %x, hdl %x, base address %p\n",
__func__, ioparam->handle, unlock.res_handle,
(void *)unlock.res_mem);
/* User space allocated resources. */
if (resource->pid) {
/* Flush if requested */
if (resource->res_cached && flush) {
dma_addr_t phys_addr = 0;
resource->res_stats[FLUSH]++;
phys_addr =
(dma_addr_t)((uint32_t)resource->res_base_mem &
0x3FFFFFFF);
phys_addr += (dma_addr_t)mm_vc_mem_phys_addr;
/* L1 cache flush */
down_read(¤t->mm->mmap_sem);
list_for_each_entry(map, &resource->map_list,
resource_map_list) {
if (map->vma) {
const unsigned long start = map->vma->vm_start;
const unsigned long end = map->vma->vm_end;
ret = clean_invalid_mem_walk(start, end - start,
VCSM_CACHE_OP_FLUSH);
if (ret)
goto error;
}
}
up_read(¤t->mm->mmap_sem);
/* L2 cache flush */
outer_clean_range(phys_addr,
phys_addr +
(size_t) resource->res_size);
}
/* We need to zap all the vmas associated with this resource */
if (resource->lock_count == 1) {
down_read(¤t->mm->mmap_sem);
list_for_each_entry(map, &resource->map_list,
resource_map_list) {
if (map->vma) {
zap_vma_ptes(map->vma,
map->vma->vm_start,
map->vma->vm_end -
map->vma->vm_start);
}
}
up_read(¤t->mm->mmap_sem);
}
}
/* Kernel allocated resources. */
else {
/* Global + Taken in this context */
if (resource->ref_count == 2) {
if (!list_empty(&resource->map_list)) {
list_for_each_entry_safe(map, map_tmp,
&resource->map_list,
resource_map_list) {
if (map->res_addr) {
if (flush &&
(resource->res_cached ==
VMCS_SM_CACHE_HOST)) {
unsigned long
phys_addr;
phys_addr = (uint32_t)
resource->res_base_mem & 0x3FFFFFFF;
phys_addr +=
mm_vc_mem_phys_addr;
/* L1 cache flush */
dmac_flush_range((const
void
*)
map->res_addr, (const void *)
(map->res_addr + resource->res_size));
/* L2 cache flush */
outer_clean_range
(phys_addr,
phys_addr +
(size_t)
resource->res_size);
}
iounmap((void *)map->res_addr);
map->res_addr = 0;
vmcs_sm_remove_map(sm_state,
map->resource,
map);
break;
}
}
}
}
}
if (resource->lock_count) {
/* Bypass the videocore unlock. */
if (no_vc_unlock)
status = 0;
/* Unlock the videocore allocated resource. */
else {
status =
vc_vchi_sm_unlock(sm_state->sm_handle, &unlock,
&private->int_trans_id,
wait_reply);
if (status == -EINTR) {
pr_debug("[%s]: requesting unlock memory action restart (trans_id: %u)\n",
__func__, private->int_trans_id);
ret = -ERESTARTSYS;
resource->res_stats[UNLOCK]--;
private->restart_sys = -EINTR;
private->int_action = VC_SM_MSG_TYPE_UNLOCK;
goto error;
} else if (status != 0) {
pr_err("[%s]: failed to unlock vc mem (status: %u, trans_id: %u)\n",
__func__, status, private->int_trans_id);
ret = -EPERM;
resource->res_stats[UNLOCK_FAIL]++;
goto error;
}
}
resource->res_stats[UNLOCK]++;
resource->lock_count--;
}
pr_debug("[%s]: success to unlock data - hdl %x, base address %p, ref-cnt %d\n",
__func__, unlock.res_handle, (void *)unlock.res_mem,
resource->lock_count);
error:
if (resource)
vmcs_sm_release_resource(resource, 0);
return ret;
}
/* Import a contiguous block of memory to be shared with VC. */
static int vc_sm_ioctl_import_dmabuf(struct sm_priv_data_t *private,
struct vmcs_sm_ioctl_import_dmabuf *ioparam,
struct dma_buf *src_dma_buf)
{
int ret = 0;
int status;
struct sm_resource_t *resource = NULL;
struct vc_sm_import import = { 0 };
struct vc_sm_import_result result = { 0 };
struct dma_buf *dma_buf;
struct dma_buf_attachment *attach = NULL;
struct sg_table *sgt = NULL;
/* Setup our allocation parameters */
if (src_dma_buf) {
get_dma_buf(src_dma_buf);
dma_buf = src_dma_buf;
} else {
dma_buf = dma_buf_get(ioparam->dmabuf_fd);
}
if (IS_ERR(dma_buf))
return PTR_ERR(dma_buf);
attach = dma_buf_attach(dma_buf, &sm_state->pdev->dev);
if (IS_ERR(attach)) {
ret = PTR_ERR(attach);
goto error;
}
sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
if (IS_ERR(sgt)) {
ret = PTR_ERR(sgt);
goto error;
}
/* Verify that the address block is contiguous */
if (sgt->nents != 1) {
ret = -ENOMEM;
goto error;
}
import.type = ((ioparam->cached == VMCS_SM_CACHE_VC) ||
(ioparam->cached == VMCS_SM_CACHE_BOTH)) ?
VC_SM_ALLOC_CACHED : VC_SM_ALLOC_NON_CACHED;
import.addr = (uint32_t)sg_dma_address(sgt->sgl);
import.size = sg_dma_len(sgt->sgl);
import.allocator = current->tgid;
if (*ioparam->name)
memcpy(import.name, ioparam->name, sizeof(import.name) - 1);
else
memcpy(import.name, VMCS_SM_RESOURCE_NAME_DEFAULT,
sizeof(VMCS_SM_RESOURCE_NAME_DEFAULT));
pr_debug("[%s]: attempt to import \"%s\" data - type %u, addr %p, size %u\n",
__func__, import.name, import.type,
(void *)import.addr, import.size);
/* Allocate local resource to track this allocation. */
resource = kzalloc(sizeof(*resource), GFP_KERNEL);
if (!resource) {
ret = -ENOMEM;
goto error;
}
INIT_LIST_HEAD(&resource->map_list);
resource->ref_count++;
resource->pid = current->tgid;
/* Allocate the videocore resource. */
status = vc_vchi_sm_import(sm_state->sm_handle, &import, &result,
&private->int_trans_id);
if (status == -EINTR) {
pr_debug("[%s]: requesting import memory action restart (trans_id: %u)\n",
__func__, private->int_trans_id);
ret = -ERESTARTSYS;
private->restart_sys = -EINTR;
private->int_action = VC_SM_MSG_TYPE_IMPORT;
goto error;
} else if (status || !result.res_handle) {
pr_debug("[%s]: failed to import memory on videocore (status: %u, trans_id: %u)\n",
__func__, status, private->int_trans_id);
ret = -ENOMEM;
resource->res_stats[ALLOC_FAIL]++;
goto error;
}
/* Keep track of the resource we created. */
resource->private = private;
resource->res_handle = result.res_handle;
resource->res_size = import.size;
resource->res_cached = ioparam->cached;
resource->dma_buf = dma_buf;
resource->attach = attach;
resource->sgt = sgt;
resource->dma_addr = sg_dma_address(sgt->sgl);
/*
* Kernel/user GUID. This global identifier is used for mmap'ing the
* allocated region from user space, it is passed as the mmap'ing
* offset, we use it to 'hide' the videocore handle/address.
*/
mutex_lock(&sm_state->lock);
resource->res_guid = ++sm_state->guid;
mutex_unlock(&sm_state->lock);
resource->res_guid <<= PAGE_SHIFT;
vmcs_sm_add_resource(private, resource);
/* We're done */
resource->res_stats[IMPORT]++;
ioparam->handle = resource->res_guid;
return 0;
error:
if (resource) {
resource->res_stats[IMPORT_FAIL]++;
vc_sm_resource_deceased(resource, 1);
kfree(resource);
}
if (sgt)
dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
if (attach)
dma_buf_detach(dma_buf, attach);
dma_buf_put(dma_buf);
return ret;
}
/* Handle control from host. */
static long vc_sm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
int ret = 0;
unsigned int cmdnr = _IOC_NR(cmd);
struct sm_priv_data_t *file_data =
(struct sm_priv_data_t *)file->private_data;
struct sm_resource_t *resource = NULL;
/* Validate we can work with this device. */
if ((sm_state == NULL) || (file_data == NULL)) {
pr_err("[%s]: invalid device\n", __func__);
ret = -EPERM;
goto out;
}
pr_debug("[%s]: cmd %x tgid %u, owner %u\n", __func__, cmdnr,
current->tgid, file_data->pid);
/* Action is a re-post of a previously interrupted action? */
if (file_data->restart_sys == -EINTR) {
struct vc_sm_action_clean_t action_clean;
pr_debug("[%s]: clean up of action %u (trans_id: %u) following EINTR\n",
__func__, file_data->int_action,
file_data->int_trans_id);
action_clean.res_action = file_data->int_action;
action_clean.action_trans_id = file_data->int_trans_id;
vc_vchi_sm_clean_up(sm_state->sm_handle, &action_clean);
file_data->restart_sys = 0;
}
/* Now process the command. */
switch (cmdnr) {
/* New memory allocation.
*/
case VMCS_SM_CMD_ALLOC:
{
struct vmcs_sm_ioctl_alloc ioparam;
/* Get the parameter data. */
if (copy_from_user
(&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
pr_err("[%s]: failed to copy-from-user for cmd %x\n",
__func__, cmdnr);
ret = -EFAULT;
goto out;
}
ret = vc_sm_ioctl_alloc(file_data, &ioparam);
if (!ret &&
(copy_to_user((void *)arg,
&ioparam, sizeof(ioparam)) != 0)) {
struct vmcs_sm_ioctl_free freeparam = {
ioparam.handle
};
pr_err("[%s]: failed to copy-to-user for cmd %x\n",
__func__, cmdnr);
vc_sm_ioctl_free(file_data, &freeparam);
ret = -EFAULT;
}
/* Done. */
goto out;
}
break;
/* Share existing memory allocation. */
case VMCS_SM_CMD_ALLOC_SHARE:
{
struct vmcs_sm_ioctl_alloc_share ioparam;
/* Get the parameter data. */
if (copy_from_user
(&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
pr_err("[%s]: failed to copy-from-user for cmd %x\n",
__func__, cmdnr);
ret = -EFAULT;
goto out;
}
ret = vc_sm_ioctl_alloc_share(file_data, &ioparam);
/* Copy result back to user. */
if (!ret
&& copy_to_user((void *)arg, &ioparam,
sizeof(ioparam)) != 0) {
struct vmcs_sm_ioctl_free freeparam = {
ioparam.handle
};
pr_err("[%s]: failed to copy-to-user for cmd %x\n",
__func__, cmdnr);
vc_sm_ioctl_free(file_data, &freeparam);
ret = -EFAULT;
}
/* Done. */
goto out;
}
break;
case VMCS_SM_CMD_IMPORT_DMABUF:
{
struct vmcs_sm_ioctl_import_dmabuf ioparam;
/* Get the parameter data. */
if (copy_from_user
(&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
pr_err("[%s]: failed to copy-from-user for cmd %x\n",
__func__, cmdnr);
ret = -EFAULT;
goto out;
}
ret = vc_sm_ioctl_import_dmabuf(file_data, &ioparam,
NULL);
if (!ret &&
(copy_to_user((void *)arg,
&ioparam, sizeof(ioparam)) != 0)) {
struct vmcs_sm_ioctl_free freeparam = {
ioparam.handle
};
pr_err("[%s]: failed to copy-to-user for cmd %x\n",
__func__, cmdnr);
vc_sm_ioctl_free(file_data, &freeparam);
ret = -EFAULT;
}
/* Done. */
goto out;
}
break;
/* Lock (attempt to) *and* register a cache behavior change. */
case VMCS_SM_CMD_LOCK_CACHE:
{
struct vmcs_sm_ioctl_lock_cache ioparam;
struct vmcs_sm_ioctl_lock_unlock lock;
/* Get parameter data. */
if (copy_from_user
(&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
pr_err("[%s]: failed to copy-from-user for cmd %x\n",
__func__, cmdnr);
ret = -EFAULT;
goto out;
}
lock.handle = ioparam.handle;
ret =
vc_sm_ioctl_lock(file_data, &lock, 1,
ioparam.cached, 0);
/* Done. */
goto out;
}
break;
/* Lock (attempt to) existing memory allocation. */
case VMCS_SM_CMD_LOCK:
{
struct vmcs_sm_ioctl_lock_unlock ioparam;
/* Get parameter data. */
if (copy_from_user
(&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
pr_err("[%s]: failed to copy-from-user for cmd %x\n",
__func__, cmdnr);
ret = -EFAULT;
goto out;
}
ret = vc_sm_ioctl_lock(file_data, &ioparam, 0, 0, 0);
/* Copy result back to user. */
if (copy_to_user((void *)arg, &ioparam, sizeof(ioparam))
!= 0) {
pr_err("[%s]: failed to copy-to-user for cmd %x\n",
__func__, cmdnr);
ret = -EFAULT;
}
/* Done. */
goto out;
}
break;
/* Unlock (attempt to) existing memory allocation. */
case VMCS_SM_CMD_UNLOCK:
{
struct vmcs_sm_ioctl_lock_unlock ioparam;
/* Get parameter data. */
if (copy_from_user
(&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
pr_err("[%s]: failed to copy-from-user for cmd %x\n",
__func__, cmdnr);
ret = -EFAULT;
goto out;
}
ret = vc_sm_ioctl_unlock(file_data, &ioparam, 0, 1, 0);
/* Done. */
goto out;
}
break;
/* Resize (attempt to) existing memory allocation. */
case VMCS_SM_CMD_RESIZE:
{
struct vmcs_sm_ioctl_resize ioparam;
/* Get parameter data. */
if (copy_from_user
(&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
pr_err("[%s]: failed to copy-from-user for cmd %x\n",
__func__, cmdnr);
ret = -EFAULT;
goto out;
}
ret = vc_sm_ioctl_resize(file_data, &ioparam);
/* Copy result back to user. */
if (copy_to_user((void *)arg, &ioparam, sizeof(ioparam))
!= 0) {
pr_err("[%s]: failed to copy-to-user for cmd %x\n",
__func__, cmdnr);
ret = -EFAULT;
}
goto out;
}
break;
/* Terminate existing memory allocation.
*/
case VMCS_SM_CMD_FREE:
{
struct vmcs_sm_ioctl_free ioparam;
/* Get parameter data.
*/
if (copy_from_user
(&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
pr_err("[%s]: failed to copy-from-user for cmd %x\n",
__func__, cmdnr);
ret = -EFAULT;
goto out;
}
ret = vc_sm_ioctl_free(file_data, &ioparam);
/* Done.
*/
goto out;
}
break;
/* Walk allocation on videocore, information shows up in the
** videocore log.
*/
case VMCS_SM_CMD_VC_WALK_ALLOC:
{
pr_debug("[%s]: invoking walk alloc\n", __func__);
if (vc_vchi_sm_walk_alloc(sm_state->sm_handle) != 0)
pr_err("[%s]: failed to walk-alloc on videocore\n",
__func__);
/* Done.
*/
goto out;
}
break;
/* Walk mapping table on host, information shows up in the
** kernel log.
*/
case VMCS_SM_CMD_HOST_WALK_MAP:
{
/* Use pid of -1 to tell to walk the whole map. */
vmcs_sm_host_walk_map_per_pid(-1);
/* Done. */
goto out;
}
break;
/* Walk mapping table per process on host. */
case VMCS_SM_CMD_HOST_WALK_PID_ALLOC:
{
struct vmcs_sm_ioctl_walk ioparam;
/* Get parameter data. */
if (copy_from_user(&ioparam,
(void *)arg, sizeof(ioparam)) != 0) {
pr_err("[%s]: failed to copy-from-user for cmd %x\n",
__func__, cmdnr);
ret = -EFAULT;
goto out;
}
vmcs_sm_host_walk_alloc(file_data);
/* Done. */
goto out;
}
break;
/* Walk allocation per process on host. */
case VMCS_SM_CMD_HOST_WALK_PID_MAP:
{
struct vmcs_sm_ioctl_walk ioparam;
/* Get parameter data. */
if (copy_from_user(&ioparam,
(void *)arg, sizeof(ioparam)) != 0) {
pr_err("[%s]: failed to copy-from-user for cmd %x\n",
__func__, cmdnr);
ret = -EFAULT;
goto out;
}
vmcs_sm_host_walk_map_per_pid(ioparam.pid);
/* Done. */
goto out;
}
break;
/* Gets the size of the memory associated with a user handle. */
case VMCS_SM_CMD_SIZE_USR_HANDLE:
{
struct vmcs_sm_ioctl_size ioparam;
/* Get parameter data. */
if (copy_from_user(&ioparam,
(void *)arg, sizeof(ioparam)) != 0) {
pr_err("[%s]: failed to copy-from-user for cmd %x\n",
__func__, cmdnr);
ret = -EFAULT;
goto out;
}
/* Locate resource from GUID. */
resource =
vmcs_sm_acquire_resource(file_data, ioparam.handle);
if (resource != NULL) {
ioparam.size = resource->res_size;
vmcs_sm_release_resource(resource, 0);
} else {
ioparam.size = 0;
}
if (copy_to_user((void *)arg,
&ioparam, sizeof(ioparam)) != 0) {
pr_err("[%s]: failed to copy-to-user for cmd %x\n",
__func__, cmdnr);
ret = -EFAULT;
}
/* Done. */
goto out;
}
break;
/* Verify we are dealing with a valid resource. */
case VMCS_SM_CMD_CHK_USR_HANDLE:
{
struct vmcs_sm_ioctl_chk ioparam;
/* Get parameter data. */
if (copy_from_user(&ioparam,
(void *)arg, sizeof(ioparam)) != 0) {
pr_err("[%s]: failed to copy-from-user for cmd %x\n",
__func__, cmdnr);
ret = -EFAULT;
goto out;
}
/* Locate resource from GUID. */
resource =
vmcs_sm_acquire_resource(file_data, ioparam.handle);
if (resource == NULL)
ret = -EINVAL;
/*
* If the resource is cacheable, return additional
* information that may be needed to flush the cache.
*/
else if ((resource->res_cached == VMCS_SM_CACHE_HOST) ||
(resource->res_cached == VMCS_SM_CACHE_BOTH)) {
ioparam.addr =
vmcs_sm_usr_address_from_pid_and_usr_handle
(current->tgid, ioparam.handle);
ioparam.size = resource->res_size;
ioparam.cache = resource->res_cached;
} else {
ioparam.addr = 0;
ioparam.size = 0;
ioparam.cache = resource->res_cached;
}
if (resource)
vmcs_sm_release_resource(resource, 0);
if (copy_to_user((void *)arg,
&ioparam, sizeof(ioparam)) != 0) {
pr_err("[%s]: failed to copy-to-user for cmd %x\n",
__func__, cmdnr);
ret = -EFAULT;
}
/* Done. */
goto out;
}
break;
/*
* Maps a user handle given the process and the virtual address.
*/
case VMCS_SM_CMD_MAPPED_USR_HANDLE:
{
struct vmcs_sm_ioctl_map ioparam;
/* Get parameter data. */
if (copy_from_user(&ioparam,
(void *)arg, sizeof(ioparam)) != 0) {
pr_err("[%s]: failed to copy-from-user for cmd %x\n",
__func__, cmdnr);
ret = -EFAULT;
goto out;
}
ioparam.handle =
vmcs_sm_usr_handle_from_pid_and_address(
ioparam.pid, ioparam.addr);
resource =
vmcs_sm_acquire_resource(file_data, ioparam.handle);
if ((resource != NULL)
&& ((resource->res_cached == VMCS_SM_CACHE_HOST)
|| (resource->res_cached ==
VMCS_SM_CACHE_BOTH))) {
ioparam.size = resource->res_size;
} else {
ioparam.size = 0;
}
if (resource)
vmcs_sm_release_resource(resource, 0);
if (copy_to_user((void *)arg,
&ioparam, sizeof(ioparam)) != 0) {
pr_err("[%s]: failed to copy-to-user for cmd %x\n",
__func__, cmdnr);
ret = -EFAULT;
}
/* Done. */
goto out;
}
break;
/*
* Maps a videocore handle given process and virtual address.
*/
case VMCS_SM_CMD_MAPPED_VC_HDL_FROM_ADDR:
{
struct vmcs_sm_ioctl_map ioparam;
/* Get parameter data. */
if (copy_from_user(&ioparam,
(void *)arg, sizeof(ioparam)) != 0) {
pr_err("[%s]: failed to copy-from-user for cmd %x\n",
__func__, cmdnr);
ret = -EFAULT;
goto out;
}
ioparam.handle = vmcs_sm_vc_handle_from_pid_and_address(
ioparam.pid, ioparam.addr);
if (copy_to_user((void *)arg,
&ioparam, sizeof(ioparam)) != 0) {
pr_err("[%s]: failed to copy-to-user for cmd %x\n",
__func__, cmdnr);
ret = -EFAULT;
}
/* Done. */
goto out;
}
break;
/* Maps a videocore handle given process and user handle. */
case VMCS_SM_CMD_MAPPED_VC_HDL_FROM_HDL:
{
struct vmcs_sm_ioctl_map ioparam;
/* Get parameter data. */
if (copy_from_user(&ioparam,
(void *)arg, sizeof(ioparam)) != 0) {
pr_err("[%s]: failed to copy-from-user for cmd %x\n",
__func__, cmdnr);
ret = -EFAULT;
goto out;
}
/* Locate resource from GUID. */
resource =
vmcs_sm_acquire_resource(file_data, ioparam.handle);
if (resource != NULL) {
ioparam.handle = resource->res_handle;
vmcs_sm_release_resource(resource, 0);
} else {
ioparam.handle = 0;
}
if (copy_to_user((void *)arg,
&ioparam, sizeof(ioparam)) != 0) {
pr_err("[%s]: failed to copy-to-user for cmd %x\n",
__func__, cmdnr);
ret = -EFAULT;
}
/* Done. */
goto out;
}
break;
/*
* Maps a videocore address given process and videocore handle.
*/
case VMCS_SM_CMD_MAPPED_VC_ADDR_FROM_HDL:
{
struct vmcs_sm_ioctl_map ioparam;
/* Get parameter data. */
if (copy_from_user(&ioparam,
(void *)arg, sizeof(ioparam)) != 0) {
pr_err("[%s]: failed to copy-from-user for cmd %x\n",
__func__, cmdnr);
ret = -EFAULT;
goto out;
}
/* Locate resource from GUID. */
resource =
vmcs_sm_acquire_resource(file_data, ioparam.handle);
if (resource != NULL) {
ioparam.addr =
(unsigned int)resource->res_base_mem;
vmcs_sm_release_resource(resource, 0);
} else {
ioparam.addr = 0;
}
if (copy_to_user((void *)arg,
&ioparam, sizeof(ioparam)) != 0) {
pr_err("[%s]: failed to copy-to-user for cmd %x\n",
__func__, cmdnr);
ret = -EFAULT;
}
/* Done. */
goto out;
}
break;
/* Maps a user address given process and vc handle. */
case VMCS_SM_CMD_MAPPED_USR_ADDRESS:
{
struct vmcs_sm_ioctl_map ioparam;
/* Get parameter data. */
if (copy_from_user(&ioparam,
(void *)arg, sizeof(ioparam)) != 0) {
pr_err("[%s]: failed to copy-from-user for cmd %x\n",
__func__, cmdnr);
ret = -EFAULT;
goto out;
}
/*
* Return the address information from the mapping,
* 0 (ie NULL) if it cannot locate the actual mapping.
*/
ioparam.addr =
vmcs_sm_usr_address_from_pid_and_usr_handle
(ioparam.pid, ioparam.handle);
if (copy_to_user((void *)arg,
&ioparam, sizeof(ioparam)) != 0) {
pr_err("[%s]: failed to copy-to-user for cmd %x\n",
__func__, cmdnr);
ret = -EFAULT;
}
/* Done. */
goto out;
}
break;
/* Flush the cache for a given mapping. */
case VMCS_SM_CMD_FLUSH:
{
struct vmcs_sm_ioctl_cache ioparam;
/* Get parameter data. */
if (copy_from_user(&ioparam,
(void *)arg, sizeof(ioparam)) != 0) {
pr_err("[%s]: failed to copy-from-user for cmd %x\n",
__func__, cmdnr);
ret = -EFAULT;
goto out;
}
/* Locate resource from GUID. */
resource =
vmcs_sm_acquire_resource(file_data, ioparam.handle);
if (resource == NULL) {
ret = -EINVAL;
goto out;
}
ret = clean_invalid_resource_walk((void __user*) ioparam.addr,
ioparam.size, VCSM_CACHE_OP_FLUSH, ioparam.handle,
resource);
vmcs_sm_release_resource(resource, 0);
if (ret)
goto out;
}
break;
/* Invalidate the cache for a given mapping. */
case VMCS_SM_CMD_INVALID:
{
struct vmcs_sm_ioctl_cache ioparam;
/* Get parameter data. */
if (copy_from_user(&ioparam,
(void *)arg, sizeof(ioparam)) != 0) {
pr_err("[%s]: failed to copy-from-user for cmd %x\n",
__func__, cmdnr);
ret = -EFAULT;
goto out;
}
/* Locate resource from GUID. */
resource =
vmcs_sm_acquire_resource(file_data, ioparam.handle);
if (resource == NULL) {
ret = -EINVAL;
goto out;
}
ret = clean_invalid_resource_walk((void __user*) ioparam.addr,
ioparam.size, VCSM_CACHE_OP_INV, ioparam.handle, resource);
vmcs_sm_release_resource(resource, 0);
if (ret)
goto out;
}
break;
/* Flush/Invalidate the cache for a given mapping. */
case VMCS_SM_CMD_CLEAN_INVALID:
{
int i;
struct vmcs_sm_ioctl_clean_invalid ioparam;
/* Get parameter data. */
if (copy_from_user(&ioparam,
(void *)arg, sizeof(ioparam)) != 0) {
pr_err("[%s]: failed to copy-from-user for cmd %x\n",
__func__, cmdnr);
ret = -EFAULT;
goto out;
}
for (i = 0; i < sizeof(ioparam.s) / sizeof(*ioparam.s); i++) {
if (ioparam.s[i].cmd == VCSM_CACHE_OP_NOP)
break;
/* Locate resource from GUID. */
resource =
vmcs_sm_acquire_resource(file_data, ioparam.s[i].handle);
if (resource == NULL) {
ret = -EINVAL;
goto out;
}
ret = clean_invalid_resource_walk(
(void __user*) ioparam.s[i].addr, ioparam.s[i].size,
ioparam.s[i].cmd, ioparam.s[i].handle, resource);
vmcs_sm_release_resource(resource, 0);
if (ret)
goto out;
}
}
break;
/*
* Flush/Invalidate the cache for a given mapping.
* Blocks must be pinned (i.e. accessed) before this call.
*/
case VMCS_SM_CMD_CLEAN_INVALID2:
{
int i;
struct vmcs_sm_ioctl_clean_invalid2 ioparam;
struct vmcs_sm_ioctl_clean_invalid_block *block = NULL;
/* Get parameter data. */
if (copy_from_user(&ioparam,
(void *)arg, sizeof(ioparam)) != 0) {
pr_err("[%s]: failed to copy-from-user header for cmd %x\n",
__func__, cmdnr);
ret = -EFAULT;
goto out;
}
block = kmalloc(ioparam.op_count *
sizeof(struct vmcs_sm_ioctl_clean_invalid_block),
GFP_KERNEL);
if (!block) {
ret = -EFAULT;
goto out;
}
if (copy_from_user(block,
(void *)(arg + sizeof(ioparam)), ioparam.op_count * sizeof(struct vmcs_sm_ioctl_clean_invalid_block)) != 0) {
pr_err("[%s]: failed to copy-from-user payload for cmd %x\n",
__func__, cmdnr);
ret = -EFAULT;
goto out;
}
for (i = 0; i < ioparam.op_count; i++) {
const struct vmcs_sm_ioctl_clean_invalid_block * const op = block + i;
if (op->invalidate_mode == VCSM_CACHE_OP_NOP)
continue;
ret = clean_invalid_contiguous_mem_2d(
(void __user*) op->start_address, op->block_count,
op->block_size, op->inter_block_stride,
op->invalidate_mode);
if (ret)
break;
}
kfree(block);
}
break;
default:
{
ret = -EINVAL;
goto out;
}
break;
}
out:
return ret;
}
/* Device operations that we managed in this driver. */
static const struct file_operations vmcs_sm_ops = {
.owner = THIS_MODULE,
.unlocked_ioctl = vc_sm_ioctl,
.open = vc_sm_open,
.release = vc_sm_release,
.mmap = vc_sm_mmap,
};
/* Creation of device. */
static int vc_sm_create_sharedmemory(void)
{
int ret;
if (sm_state == NULL) {
ret = -ENOMEM;
goto out;
}
/* Create a device class for creating dev nodes. */
sm_state->sm_class = class_create(THIS_MODULE, "vc-sm");
if (IS_ERR(sm_state->sm_class)) {
pr_err("[%s]: unable to create device class\n", __func__);
ret = PTR_ERR(sm_state->sm_class);
goto out;
}
/* Create a character driver. */
ret = alloc_chrdev_region(&sm_state->sm_devid,
DEVICE_MINOR, 1, DEVICE_NAME);
if (ret != 0) {
pr_err("[%s]: unable to allocate device number\n", __func__);
goto out_dev_class_destroy;
}
cdev_init(&sm_state->sm_cdev, &vmcs_sm_ops);
ret = cdev_add(&sm_state->sm_cdev, sm_state->sm_devid, 1);
if (ret != 0) {
pr_err("[%s]: unable to register device\n", __func__);
goto out_chrdev_unreg;
}
/* Create a device node. */
sm_state->sm_dev = device_create(sm_state->sm_class,
NULL,
MKDEV(MAJOR(sm_state->sm_devid),
DEVICE_MINOR), NULL,
DEVICE_NAME);
if (IS_ERR(sm_state->sm_dev)) {
pr_err("[%s]: unable to create device node\n", __func__);
ret = PTR_ERR(sm_state->sm_dev);
goto out_chrdev_del;
}
goto out;
out_chrdev_del:
cdev_del(&sm_state->sm_cdev);
out_chrdev_unreg:
unregister_chrdev_region(sm_state->sm_devid, 1);
out_dev_class_destroy:
class_destroy(sm_state->sm_class);
sm_state->sm_class = NULL;
out:
return ret;
}
/* Termination of the device. */
static int vc_sm_remove_sharedmemory(void)
{
int ret;
if (sm_state == NULL) {
/* Nothing to do. */
ret = 0;
goto out;
}
/* Remove the sharedmemory character driver. */
cdev_del(&sm_state->sm_cdev);
/* Unregister region. */
unregister_chrdev_region(sm_state->sm_devid, 1);
ret = 0;
goto out;
out:
return ret;
}
/* Videocore connected. */
static void vc_sm_connected_init(void)
{
int ret;
VCHI_INSTANCE_T vchi_instance;
VCHI_CONNECTION_T *vchi_connection = NULL;
pr_info("[%s]: start\n", __func__);
/*
* Initialize and create a VCHI connection for the shared memory service
* running on videocore.
*/
ret = vchi_initialise(&vchi_instance);
if (ret != 0) {
pr_err("[%s]: failed to initialise VCHI instance (ret=%d)\n",
__func__, ret);
ret = -EIO;
goto err_free_mem;
}
ret = vchi_connect(NULL, 0, vchi_instance);
if (ret != 0) {
pr_err("[%s]: failed to connect VCHI instance (ret=%d)\n",
__func__, ret);
ret = -EIO;
goto err_free_mem;
}
/* Initialize an instance of the shared memory service. */
sm_state->sm_handle =
vc_vchi_sm_init(vchi_instance, &vchi_connection, 1);
if (sm_state->sm_handle == NULL) {
pr_err("[%s]: failed to initialize shared memory service\n",
__func__);
ret = -EPERM;
goto err_free_mem;
}
/* Create a debug fs directory entry (root). */
sm_state->dir_root = debugfs_create_dir(VC_SM_DIR_ROOT_NAME, NULL);
if (!sm_state->dir_root) {
pr_err("[%s]: failed to create \'%s\' directory entry\n",
__func__, VC_SM_DIR_ROOT_NAME);
ret = -EPERM;
goto err_stop_sm_service;
}
sm_state->dir_state.show = &vc_sm_global_state_show;
sm_state->dir_state.dir_entry = debugfs_create_file(VC_SM_STATE,
0444, sm_state->dir_root, &sm_state->dir_state,
&vc_sm_debug_fs_fops);
sm_state->dir_stats.show = &vc_sm_global_statistics_show;
sm_state->dir_stats.dir_entry = debugfs_create_file(VC_SM_STATS,
0444, sm_state->dir_root, &sm_state->dir_stats,
&vc_sm_debug_fs_fops);
/* Create the proc entry children. */
sm_state->dir_alloc = debugfs_create_dir(VC_SM_DIR_ALLOC_NAME,
sm_state->dir_root);
/* Create a shared memory device. */
ret = vc_sm_create_sharedmemory();
if (ret != 0) {
pr_err("[%s]: failed to create shared memory device\n",
__func__);
goto err_remove_debugfs;
}
INIT_LIST_HEAD(&sm_state->map_list);
INIT_LIST_HEAD(&sm_state->resource_list);
sm_state->data_knl = vc_sm_create_priv_data(0);
if (sm_state->data_knl == NULL) {
pr_err("[%s]: failed to create kernel private data tracker\n",
__func__);
goto err_remove_shared_memory;
}
/* Done! */
sm_inited = 1;
goto out;
err_remove_shared_memory:
vc_sm_remove_sharedmemory();
err_remove_debugfs:
debugfs_remove_recursive(sm_state->dir_root);
err_stop_sm_service:
vc_vchi_sm_stop(&sm_state->sm_handle);
err_free_mem:
kfree(sm_state);
out:
pr_info("[%s]: end - returning %d\n", __func__, ret);
}
/* Driver loading. */
static int bcm2835_vcsm_probe(struct platform_device *pdev)
{
pr_info("vc-sm: Videocore shared memory driver\n");
sm_state = kzalloc(sizeof(*sm_state), GFP_KERNEL);
if (!sm_state)
return -ENOMEM;
sm_state->pdev = pdev;
mutex_init(&sm_state->lock);
mutex_init(&sm_state->map_lock);
vchiq_add_connected_callback(vc_sm_connected_init);
return 0;
}
/* Driver unloading. */
static int bcm2835_vcsm_remove(struct platform_device *pdev)
{
pr_debug("[%s]: start\n", __func__);
if (sm_inited) {
/* Remove shared memory device. */
vc_sm_remove_sharedmemory();
/* Remove all proc entries. */
debugfs_remove_recursive(sm_state->dir_root);
/* Stop the videocore shared memory service. */
vc_vchi_sm_stop(&sm_state->sm_handle);
/* Free the memory for the state structure. */
mutex_destroy(&(sm_state->map_lock));
kfree(sm_state);
}
pr_debug("[%s]: end\n", __func__);
return 0;
}
#if defined(__KERNEL__)
/* Allocate a shared memory handle and block. */
int vc_sm_alloc(struct vc_sm_alloc_t *alloc, int *handle)
{
struct vmcs_sm_ioctl_alloc ioparam = { 0 };
int ret;
struct sm_resource_t *resource;
/* Validate we can work with this device. */
if (sm_state == NULL || alloc == NULL || handle == NULL) {
pr_err("[%s]: invalid input\n", __func__);
return -EPERM;
}
ioparam.size = alloc->base_unit;
ioparam.num = alloc->num_unit;
ioparam.cached =
alloc->type == VC_SM_ALLOC_CACHED ? VMCS_SM_CACHE_VC : 0;
ret = vc_sm_ioctl_alloc(sm_state->data_knl, &ioparam);
if (ret == 0) {
resource =
vmcs_sm_acquire_resource(sm_state->data_knl,
ioparam.handle);
if (resource) {
resource->pid = 0;
vmcs_sm_release_resource(resource, 0);
/* Assign valid handle at this time. */
*handle = ioparam.handle;
} else {
ret = -ENOMEM;
}
}
return ret;
}
EXPORT_SYMBOL_GPL(vc_sm_alloc);
/* Get an internal resource handle mapped from the external one. */
int vc_sm_int_handle(int handle)
{
struct sm_resource_t *resource;
int ret = 0;
/* Validate we can work with this device. */
if (sm_state == NULL || handle == 0) {
pr_err("[%s]: invalid input\n", __func__);
return 0;
}
/* Locate resource from GUID. */
resource = vmcs_sm_acquire_resource(sm_state->data_knl, handle);
if (resource) {
ret = resource->res_handle;
vmcs_sm_release_resource(resource, 0);
}
return ret;
}
EXPORT_SYMBOL_GPL(vc_sm_int_handle);
/* Free a previously allocated shared memory handle and block. */
int vc_sm_free(int handle)
{
struct vmcs_sm_ioctl_free ioparam = { handle };
/* Validate we can work with this device. */
if (sm_state == NULL || handle == 0) {
pr_err("[%s]: invalid input\n", __func__);
return -EPERM;
}
return vc_sm_ioctl_free(sm_state->data_knl, &ioparam);
}
EXPORT_SYMBOL_GPL(vc_sm_free);
/* Lock a memory handle for use by kernel. */
int vc_sm_lock(int handle, enum vc_sm_lock_cache_mode mode,
unsigned long *data)
{
struct vmcs_sm_ioctl_lock_unlock ioparam;
int ret;
/* Validate we can work with this device. */
if (sm_state == NULL || handle == 0 || data == NULL) {
pr_err("[%s]: invalid input\n", __func__);
return -EPERM;
}
*data = 0;
ioparam.handle = handle;
ret = vc_sm_ioctl_lock(sm_state->data_knl,
&ioparam,
1,
((mode ==
VC_SM_LOCK_CACHED) ? VMCS_SM_CACHE_HOST :
VMCS_SM_CACHE_NONE), 0);
*data = ioparam.addr;
return ret;
}
EXPORT_SYMBOL_GPL(vc_sm_lock);
/* Unlock a memory handle in use by kernel. */
int vc_sm_unlock(int handle, int flush, int no_vc_unlock)
{
struct vmcs_sm_ioctl_lock_unlock ioparam;
/* Validate we can work with this device. */
if (sm_state == NULL || handle == 0) {
pr_err("[%s]: invalid input\n", __func__);
return -EPERM;
}
ioparam.handle = handle;
return vc_sm_ioctl_unlock(sm_state->data_knl,
&ioparam, flush, 0, no_vc_unlock);
}
EXPORT_SYMBOL_GPL(vc_sm_unlock);
/* Map a shared memory region for use by kernel. */
int vc_sm_map(int handle, unsigned int sm_addr,
enum vc_sm_lock_cache_mode mode, unsigned long *data)
{
struct vmcs_sm_ioctl_lock_unlock ioparam;
int ret;
/* Validate we can work with this device. */
if (sm_state == NULL || handle == 0 || data == NULL || sm_addr == 0) {
pr_err("[%s]: invalid input\n", __func__);
return -EPERM;
}
*data = 0;
ioparam.handle = handle;
ret = vc_sm_ioctl_lock(sm_state->data_knl,
&ioparam,
1,
((mode ==
VC_SM_LOCK_CACHED) ? VMCS_SM_CACHE_HOST :
VMCS_SM_CACHE_NONE), sm_addr);
*data = ioparam.addr;
return ret;
}
EXPORT_SYMBOL_GPL(vc_sm_map);
/* Import a dmabuf to be shared with VC. */
int vc_sm_import_dmabuf(struct dma_buf *dmabuf, int *handle)
{
struct vmcs_sm_ioctl_import_dmabuf ioparam = { 0 };
int ret;
struct sm_resource_t *resource;
/* Validate we can work with this device. */
if (!sm_state || !dmabuf || !handle) {
pr_err("[%s]: invalid input\n", __func__);
return -EPERM;
}
ioparam.cached = 0;
strcpy(ioparam.name, "KRNL DMABUF");
ret = vc_sm_ioctl_import_dmabuf(sm_state->data_knl, &ioparam, dmabuf);
if (!ret) {
resource = vmcs_sm_acquire_resource(sm_state->data_knl,
ioparam.handle);
if (resource) {
resource->pid = 0;
vmcs_sm_release_resource(resource, 0);
/* Assign valid handle at this time.*/
*handle = ioparam.handle;
} else {
ret = -ENOMEM;
}
}
return ret;
}
EXPORT_SYMBOL_GPL(vc_sm_import_dmabuf);
#endif
/*
* Register the driver with device tree
*/
static const struct of_device_id bcm2835_vcsm_of_match[] = {
{.compatible = "raspberrypi,bcm2835-vcsm",},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, bcm2835_vcsm_of_match);
static struct platform_driver bcm2835_vcsm_driver = {
.probe = bcm2835_vcsm_probe,
.remove = bcm2835_vcsm_remove,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
.of_match_table = bcm2835_vcsm_of_match,
},
};
module_platform_driver(bcm2835_vcsm_driver);
MODULE_AUTHOR("Broadcom");
MODULE_DESCRIPTION("VideoCore SharedMemory Driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
rogerpueyo/openwrt-packages | libs/libdmapsharing/Makefile | 1714 | #
# Copyright (C) 2009-2012 OpenWrt.org
#
# This is free software, licensed under the GNU General Public License v2.
# See /LICENSE for more information.
#
# This Makefile is a skeleton
#
include $(TOPDIR)/rules.mk
PKG_NAME:=libdmapsharing
PKG_VERSION:=3.9.4
PKG_RELEASE:=1
PKG_MAINTAINER:=W. Michael Petullo <[email protected]>
PKG_LICENSE:=LGPLv2.1
PKG_LICENSE_FILES:=COPYING
PKG_SOURCE:=libdmapsharing-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://www.flyn.org/projects/libdmapsharing/
PKG_HASH:=fbb8eb272a3d659f534050cce190a72a02599f892f517de99a8a71984dd16ee2
PKG_FIXUP:=autoreconf
PKG_INSTALL:=1
include $(INCLUDE_DIR)/package.mk
include $(INCLUDE_DIR)/nls.mk
TARGET_LDFLAGS+= \
-Wl,-rpath-link=$(STAGING_DIR)/usr/lib
define Package/libdmapsharing
SECTION:=libs
CATEGORY:=Libraries
DEPENDS:=+libsoup +mdnsresponder +gstreamer1-libs +gstreamer1-plugins-base +gst1-mod-app
TITLE:=libdmapsharing
URL:=https://www.flyn.org/projects/libdmapsharing/
endef
define Package/libdmapsharing/decription
Libdmapsharing is a DMAP library implementation in C
endef
CONFIGURE_ARGS += \
--disable-check \
--disable-gtk-doc \
--disable-introspection
define Build/InstallDev
$(INSTALL_DIR) $(1)/usr/include/
$(CP) \
$(PKG_INSTALL_DIR)/usr/include/libdmapsharing-4.0/ \
$(1)/usr/include/
$(INSTALL_DIR) $(1)/usr/lib/
$(CP) \
$(PKG_INSTALL_DIR)/usr/lib/*.so* \
$(1)/usr/lib/
$(INSTALL_DIR) $(1)/usr/lib/pkgconfig/
$(INSTALL_DATA) \
$(PKG_INSTALL_DIR)/usr/lib/pkgconfig/*.pc \
$(1)/usr/lib/pkgconfig/
endef
define Package/libdmapsharing/install
$(INSTALL_DIR) $(1)/usr/lib/
$(CP) \
$(PKG_INSTALL_DIR)/usr/lib/*.so* \
$(1)/usr/lib/
endef
$(eval $(call BuildPackage,libdmapsharing))
| gpl-2.0 |
MarcPouliquenInria/visp | modules/imgproc/test/testFloodFill.cpp | 14454 | /****************************************************************************
*
* This file is part of the ViSP software.
* Copyright (C) 2005 - 2017 by Inria. All rights reserved.
*
* This software is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
* See the file LICENSE.txt at the root directory of this source
* distribution for additional information about the GNU GPL.
*
* For using ViSP with software that can not be combined with the GNU
* GPL, please contact Inria about acquiring a ViSP Professional
* Edition License.
*
* See http://visp.inria.fr for more information.
*
* This software was developed at:
* Inria Rennes - Bretagne Atlantique
* Campus Universitaire de Beaulieu
* 35042 Rennes Cedex
* France
*
* If you have questions regarding the use of this file, please contact
* Inria at [email protected]
*
* This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
* WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*
* Description:
* Test flood fill algorithm.
*
* Authors:
* Souriya Trinh
*
*****************************************************************************/
#include <iomanip>
#include <visp3/core/vpImageTools.h>
#include <visp3/core/vpIoTools.h>
#include <visp3/imgproc/vpImgproc.h>
#include <visp3/io/vpImageIo.h>
#include <visp3/io/vpParseArgv.h>
/*!
\example testFloodFill.cpp
\brief Test flood fill algorithm.
*/
// List of allowed command line options
#define GETOPTARGS "cdi:o:h"
void usage(const char *name, const char *badparam, std::string ipath, std::string opath, std::string user);
bool getOptions(int argc, const char **argv, std::string &ipath, std::string &opath, std::string user);
/*
Print the program options.
\param name : Program name.
\param badparam : Bad parameter name.
\param ipath: Input image path.
\param opath : Output image path.
\param user : Username.
*/
void usage(const char *name, const char *badparam, std::string ipath, std::string opath, std::string user)
{
fprintf(stdout, "\n\
Test flood fill algorithm.\n\
\n\
SYNOPSIS\n\
%s [-i <input image path>] [-o <output image path>]\n\
[-h]\n \
", name);
fprintf(stdout, "\n\
OPTIONS: Default\n\
-i <input image path> %s\n\
Set image input path.\n\
From this path read \"Klimt/Klimt.pgm\"\n\
image.\n\
Setting the VISP_INPUT_IMAGE_PATH environment\n\
variable produces the same behaviour than using\n\
this option.\n\
\n\
-o <output image path> %s\n\
Set image output path.\n\
From this directory, creates the \"%s\"\n\
subdirectory depending on the username, where \n\
output result images are written.\n\
\n\
-h\n\
Print the help.\n\n", ipath.c_str(), opath.c_str(), user.c_str());
if (badparam)
fprintf(stdout, "\nERROR: Bad parameter [%s]\n", badparam);
}
/*!
Set the program options.
\param argc : Command line number of parameters.
\param argv : Array of command line parameters.
\param ipath: Input image path.
\param opath : Output image path.
\param user : Username.
\return false if the program has to be stopped, true otherwise.
*/
bool getOptions(int argc, const char **argv, std::string &ipath, std::string &opath, std::string user)
{
const char *optarg_;
int c;
while ((c = vpParseArgv::parse(argc, argv, GETOPTARGS, &optarg_)) > 1) {
switch (c) {
case 'i':
ipath = optarg_;
break;
case 'o':
opath = optarg_;
break;
case 'h':
usage(argv[0], NULL, ipath, opath, user);
return false;
break;
case 'c':
case 'd':
break;
default:
usage(argv[0], optarg_, ipath, opath, user);
return false;
break;
}
}
if ((c == 1) || (c == -1)) {
// standalone param or error
usage(argv[0], NULL, ipath, opath, user);
std::cerr << "ERROR: " << std::endl;
std::cerr << " Bad argument " << optarg_ << std::endl << std::endl;
return false;
}
return true;
}
void printImage(const vpImage<unsigned char> &I, const std::string &name)
{
std::cout << "\n" << name << ":" << std::endl;
std::cout << " ";
for (unsigned int j = 0; j < I.getWidth(); j++) {
std::cout << std::setfill(' ') << std::setw(2) << j << " ";
}
std::cout << std::endl;
std::cout << std::setfill(' ') << std::setw(3) << "+";
for (unsigned int j = 0; j < I.getWidth(); j++) {
std::cout << std::setw(3) << "---";
}
std::cout << std::endl;
for (unsigned int i = 0; i < I.getHeight(); i++) {
std::cout << std::setfill(' ') << std::setw(2) << i << "|";
for (unsigned int j = 0; j < I.getWidth(); j++) {
std::cout << std::setfill(' ') << std::setw(2) << static_cast<unsigned int>(I[i][j]) << " ";
}
std::cout << std::endl;
}
}
int main(int argc, const char **argv)
{
try {
std::string env_ipath;
std::string opt_ipath;
std::string opt_opath;
std::string ipath;
std::string opath;
std::string filename;
std::string username;
// Get the visp-images-data package path or VISP_INPUT_IMAGE_PATH
// environment variable value
env_ipath = vpIoTools::getViSPImagesDataPath();
// Set the default input path
if (!env_ipath.empty())
ipath = env_ipath;
// Set the default output path
#if defined(_WIN32)
opt_opath = "C:/temp";
#else
opt_opath = "/tmp";
#endif
// Get the user login name
vpIoTools::getUserName(username);
// Read the command line options
if (getOptions(argc, argv, opt_ipath, opt_opath, username) == false) {
exit(EXIT_FAILURE);
}
// Get the option values
if (!opt_ipath.empty())
ipath = opt_ipath;
if (!opt_opath.empty())
opath = opt_opath;
// Append to the output path string, the login name of the user
opath = vpIoTools::createFilePath(opath, username);
// Test if the output path exist. If no try to create it
if (vpIoTools::checkDirectory(opath) == false) {
try {
// Create the dirname
vpIoTools::makeDirectory(opath);
} catch (...) {
usage(argv[0], NULL, ipath, opt_opath, username);
std::cerr << std::endl << "ERROR:" << std::endl;
std::cerr << " Cannot create " << opath << std::endl;
std::cerr << " Check your -o " << opt_opath << " option " << std::endl;
exit(EXIT_FAILURE);
}
}
// Compare ipath and env_ipath. If they differ, we take into account
// the input path comming from the command line option
if (!opt_ipath.empty() && !env_ipath.empty()) {
if (ipath != env_ipath) {
std::cout << std::endl << "WARNING: " << std::endl;
std::cout << " Since -i <visp image path=" << ipath << "> "
<< " is different from VISP_IMAGE_PATH=" << env_ipath << std::endl
<< " we skip the environment variable." << std::endl;
}
}
// Test if an input path is set
if (opt_ipath.empty() && env_ipath.empty()) {
usage(argv[0], NULL, ipath, opt_opath, username);
std::cerr << std::endl << "ERROR:" << std::endl;
std::cerr << " Use -i <visp image path> option or set VISP_INPUT_IMAGE_PATH " << std::endl
<< " environment variable to specify the location of the " << std::endl
<< " image path where test images are located." << std::endl
<< std::endl;
exit(EXIT_FAILURE);
}
//
// Here starts really the test
//
unsigned char image_data[8 * 8] = {1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0,
1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1,
1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0};
vpImage<unsigned char> I_test_flood_fill_4_connexity(image_data, 8, 8, true);
vpImage<unsigned char> I_test_flood_fill_8_connexity = I_test_flood_fill_4_connexity;
printImage(I_test_flood_fill_4_connexity, "Test image data");
unsigned char image_data_check_4_connexity[8 * 8] = {
1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0,
1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0};
vpImage<unsigned char> I_check_4_connexity(image_data_check_4_connexity, 8, 8, true);
unsigned char image_data_check_8_connexity[8 * 8] = {
1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0};
vpImage<unsigned char> I_check_8_connexity(image_data_check_8_connexity, 8, 8, true);
// Test flood fill on test data 4-connexity
vp::floodFill(I_test_flood_fill_4_connexity, vpImagePoint(2, 2), 0, 1, vpImageMorphology::CONNEXITY_4);
printImage(I_test_flood_fill_4_connexity, "I_test_flood_fill_4_connexity");
if (I_test_flood_fill_4_connexity != I_check_4_connexity) {
throw vpException(vpException::fatalError, "Problem with vp::floodFill() and 4-connexity!");
}
std::cout << "\n(I_test_flood_fill_4_connexity == I_check_4_connexity)? "
<< (I_test_flood_fill_4_connexity == I_check_4_connexity) << std::endl;
// Test flood fill on test data 8-connexity
vp::floodFill(I_test_flood_fill_8_connexity, vpImagePoint(2, 2), 0, 1, vpImageMorphology::CONNEXITY_8);
printImage(I_test_flood_fill_8_connexity, "I_test_flood_fill_8_connexity");
if (I_test_flood_fill_8_connexity != I_check_8_connexity) {
throw vpException(vpException::fatalError, "Problem with vp::floodFill() and 8-connexity!");
}
std::cout << "\n(I_test_flood_fill_8_connexity == I_check_8_connexity)? "
<< (I_test_flood_fill_8_connexity == I_check_8_connexity) << std::endl;
// Read Klimt.ppm
filename = vpIoTools::createFilePath(ipath, "Klimt/Klimt.pgm");
vpImage<unsigned char> I_klimt;
vpImageIo::read(I_klimt, filename);
std::cout << "\nRead image: " << filename << " (" << I_klimt.getWidth() << "x" << I_klimt.getHeight() << ")"
<< std::endl
<< std::endl;
vpImageTools::binarise(I_klimt, (unsigned char)127, (unsigned char)255, (unsigned char)0, (unsigned char)255,
(unsigned char)255);
int seed_x = 0;
int seed_y = 0;
vpImage<unsigned char> I_klimt_flood_fill_4_connexity = I_klimt;
double t = vpTime::measureTimeMs();
vp::floodFill(I_klimt_flood_fill_4_connexity, vpImagePoint(seed_y, seed_x), 0, 255, vpImageMorphology::CONNEXITY_4);
t = vpTime::measureTimeMs() - t;
std::cout << "Flood fill on Klimt image (4-connexity): " << t << " ms" << std::endl;
filename = vpIoTools::createFilePath(opath, "Klimt_flood_fill_4_connexity.pgm");
vpImageIo::write(I_klimt_flood_fill_4_connexity, filename);
vpImage<unsigned char> I_klimt_flood_fill_8_connexity = I_klimt;
t = vpTime::measureTimeMs();
vp::floodFill(I_klimt_flood_fill_8_connexity, vpImagePoint(seed_y, seed_x), 0, 255, vpImageMorphology::CONNEXITY_8);
t = vpTime::measureTimeMs() - t;
std::cout << "Flood fill on Klimt image (8-connexity): " << t << " ms" << std::endl;
filename = vpIoTools::createFilePath(opath, "Klimt_flood_fill_8_connexity.pgm");
vpImageIo::write(I_klimt_flood_fill_8_connexity, filename);
#if VISP_HAVE_OPENCV_VERSION >= 0x020408
cv::Mat matImg_klimt_4_connexity, matImg_klimt_8_connexity;
vpImageConvert::convert(I_klimt, matImg_klimt_4_connexity);
vpImageConvert::convert(I_klimt, matImg_klimt_8_connexity);
// 4-connexity
t = vpTime::measureTimeMs();
cv::floodFill(matImg_klimt_4_connexity, cv::Point(seed_x, seed_y), cv::Scalar(255), 0, cv::Scalar(), cv::Scalar(),
4);
t = vpTime::measureTimeMs() - t;
std::cout << "OpenCV flood fill on Klimt image (4-connexity): " << t << " ms" << std::endl;
vpImage<unsigned char> I_klimt_flood_fill_4_connexity_check;
vpImageConvert::convert(matImg_klimt_4_connexity, I_klimt_flood_fill_4_connexity_check);
filename = vpIoTools::createFilePath(opath, "Klimt_flood_fill_4_connexity_opencv.pgm");
vpImageIo::write(I_klimt_flood_fill_4_connexity_check, filename);
// 8-connexity
t = vpTime::measureTimeMs();
cv::floodFill(matImg_klimt_8_connexity, cv::Point(seed_x, seed_y), cv::Scalar(255), 0, cv::Scalar(), cv::Scalar(),
8);
t = vpTime::measureTimeMs() - t;
std::cout << "OpenCV flood fill on Klimt image (8-connexity): " << t << " ms" << std::endl;
vpImage<unsigned char> I_klimt_flood_fill_8_connexity_check;
vpImageConvert::convert(matImg_klimt_8_connexity, I_klimt_flood_fill_8_connexity_check);
filename = vpIoTools::createFilePath(opath, "Klimt_flood_fill_8_connexity_opencv.pgm");
vpImageIo::write(I_klimt_flood_fill_8_connexity_check, filename);
// Check
std::cout << "\n(I_klimt_flood_fill_4_connexity == "
"I_klimt_flood_fill_4_connexity_check)? "
<< (I_klimt_flood_fill_4_connexity == I_klimt_flood_fill_4_connexity_check) << std::endl;
std::cout << "(I_klimt_flood_fill_8_connexity == "
"I_klimt_flood_fill_8_connexity_check)? "
<< (I_klimt_flood_fill_8_connexity == I_klimt_flood_fill_8_connexity_check) << std::endl;
if (I_klimt_flood_fill_4_connexity != I_klimt_flood_fill_4_connexity_check) {
throw vpException(vpException::fatalError, "(I_klimt_flood_fill_4_connexity != "
"I_klimt_flood_fill_4_connexity_check)");
}
if (I_klimt_flood_fill_8_connexity != I_klimt_flood_fill_8_connexity_check) {
throw vpException(vpException::fatalError, "(I_klimt_flood_fill_8_connexity != "
"I_klimt_flood_fill_8_connexity_check)");
}
#endif
std::cout << "\nTest flood fill is ok!" << std::endl;
return EXIT_SUCCESS;
} catch (const vpException &e) {
std::cerr << "Catch an exception: " << e.what() << std::endl;
return EXIT_FAILURE;
}
}
| gpl-2.0 |
antweb/bluez-old | profiles/time/manager.c | 1332 | /*
*
* BlueZ - Bluetooth protocol stack for Linux
*
* Copyright (C) 2012 Nokia Corporation
* Copyright (C) 2012 Marcel Holtmann <[email protected]>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include "adapter.h"
#include "manager.h"
#include "server.h"
struct btd_adapter_driver time_server_driver = {
.name = "gatt-time-server",
.probe = time_server_init,
.remove = time_server_exit,
};
int time_manager_init(void)
{
btd_register_adapter_driver(&time_server_driver);
return 0;
}
void time_manager_exit(void)
{
btd_unregister_adapter_driver(&time_server_driver);
}
| gpl-2.0 |
Kernelhacker/2.6.38.3-Adam | drivers/staging/ath6kl/os/linux/eeprom.c | 9203 | //------------------------------------------------------------------------------
// Copyright (c) 2004-2010 Atheros Communications Inc.
// All rights reserved.
//
//
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
//
//
//
// Author(s): ="Atheros"
//------------------------------------------------------------------------------
#include "ar6000_drv.h"
#include "htc.h"
#include <linux/fs.h>
#include "AR6002/hw2.0/hw/gpio_reg.h"
#include "AR6002/hw2.0/hw/si_reg.h"
//
// defines
//
#define MAX_FILENAME 1023
#define EEPROM_WAIT_LIMIT 16
#define HOST_INTEREST_ITEM_ADDRESS(item) \
(AR6002_HOST_INTEREST_ITEM_ADDRESS(item))
#define EEPROM_SZ 768
/* soft mac */
#define ATH_MAC_LEN 6
#define ATH_SOFT_MAC_TMP_BUF_LEN 64
unsigned char mac_addr[ATH_MAC_LEN];
unsigned char soft_mac_tmp_buf[ATH_SOFT_MAC_TMP_BUF_LEN];
char *p_mac = NULL;
/* soft mac */
//
// static variables
//
static u8 eeprom_data[EEPROM_SZ];
static u32 sys_sleep_reg;
static struct hif_device *p_bmi_device;
//
// Functions
//
/* soft mac */
static int
wmic_ether_aton(const char *orig, u8 *eth)
{
const char *bufp;
int i;
i = 0;
for(bufp = orig; *bufp != '\0'; ++bufp) {
unsigned int val;
int h, l;
h = hex_to_bin(*bufp++);
if (h < 0) {
printk("%s: MAC value is invalid\n", __FUNCTION__);
break;
}
l = hex_to_bin(*bufp++);
if (l < 0) {
printk("%s: MAC value is invalid\n", __FUNCTION__);
break;
}
val = (h << 4) | l;
eth[i] = (unsigned char) (val & 0377);
if(++i == ATH_MAC_LEN) {
/* That's it. Any trailing junk? */
if (*bufp != '\0') {
return 0;
}
return 1;
}
if (*bufp != ':')
break;
}
return 0;
}
static void
update_mac(unsigned char *eeprom, int size, unsigned char *macaddr)
{
int i;
u16 *ptr = (u16 *)(eeprom+4);
u16 checksum = 0;
memcpy(eeprom+10,macaddr,6);
*ptr = 0;
ptr = (u16 *)eeprom;
for (i=0; i<size; i+=2) {
checksum ^= *ptr++;
}
checksum = ~checksum;
ptr = (u16 *)(eeprom+4);
*ptr = checksum;
return;
}
/* soft mac */
/* Read a Target register and return its value. */
inline void
BMI_read_reg(u32 address, u32 *pvalue)
{
BMIReadSOCRegister(p_bmi_device, address, pvalue);
}
/* Write a value to a Target register. */
inline void
BMI_write_reg(u32 address, u32 value)
{
BMIWriteSOCRegister(p_bmi_device, address, value);
}
/* Read Target memory word and return its value. */
inline void
BMI_read_mem(u32 address, u32 *pvalue)
{
BMIReadMemory(p_bmi_device, address, (u8*)(pvalue), 4);
}
/* Write a word to a Target memory. */
inline void
BMI_write_mem(u32 address, u8 *p_data, u32 sz)
{
BMIWriteMemory(p_bmi_device, address, (u8*)(p_data), sz);
}
/*
* Enable and configure the Target's Serial Interface
* so we can access the EEPROM.
*/
static void
enable_SI(struct hif_device *p_device)
{
u32 regval;
printk("%s\n", __FUNCTION__);
p_bmi_device = p_device;
BMI_read_reg(RTC_BASE_ADDRESS+SYSTEM_SLEEP_OFFSET, &sys_sleep_reg);
BMI_write_reg(RTC_BASE_ADDRESS+SYSTEM_SLEEP_OFFSET, SYSTEM_SLEEP_DISABLE_SET(1)); //disable system sleep temporarily
BMI_read_reg(RTC_BASE_ADDRESS+CLOCK_CONTROL_OFFSET, ®val);
regval &= ~CLOCK_CONTROL_SI0_CLK_MASK;
BMI_write_reg(RTC_BASE_ADDRESS+CLOCK_CONTROL_OFFSET, regval);
BMI_read_reg(RTC_BASE_ADDRESS+RESET_CONTROL_OFFSET, ®val);
regval &= ~RESET_CONTROL_SI0_RST_MASK;
BMI_write_reg(RTC_BASE_ADDRESS+RESET_CONTROL_OFFSET, regval);
BMI_read_reg(GPIO_BASE_ADDRESS+GPIO_PIN0_OFFSET, ®val);
regval &= ~GPIO_PIN0_CONFIG_MASK;
BMI_write_reg(GPIO_BASE_ADDRESS+GPIO_PIN0_OFFSET, regval);
BMI_read_reg(GPIO_BASE_ADDRESS+GPIO_PIN1_OFFSET, ®val);
regval &= ~GPIO_PIN1_CONFIG_MASK;
BMI_write_reg(GPIO_BASE_ADDRESS+GPIO_PIN1_OFFSET, regval);
/* SI_CONFIG = 0x500a6; */
regval = SI_CONFIG_BIDIR_OD_DATA_SET(1) |
SI_CONFIG_I2C_SET(1) |
SI_CONFIG_POS_SAMPLE_SET(1) |
SI_CONFIG_INACTIVE_CLK_SET(1) |
SI_CONFIG_INACTIVE_DATA_SET(1) |
SI_CONFIG_DIVIDER_SET(6);
BMI_write_reg(SI_BASE_ADDRESS+SI_CONFIG_OFFSET, regval);
}
static void
disable_SI(void)
{
u32 regval;
printk("%s\n", __FUNCTION__);
BMI_write_reg(RTC_BASE_ADDRESS+RESET_CONTROL_OFFSET, RESET_CONTROL_SI0_RST_MASK);
BMI_read_reg(RTC_BASE_ADDRESS+CLOCK_CONTROL_OFFSET, ®val);
regval |= CLOCK_CONTROL_SI0_CLK_MASK;
BMI_write_reg(RTC_BASE_ADDRESS+CLOCK_CONTROL_OFFSET, regval);//Gate SI0 clock
BMI_write_reg(RTC_BASE_ADDRESS+SYSTEM_SLEEP_OFFSET, sys_sleep_reg); //restore system sleep setting
}
/*
* Tell the Target to start an 8-byte read from EEPROM,
* putting the results in Target RX_DATA registers.
*/
static void
request_8byte_read(int offset)
{
u32 regval;
// printk("%s: request_8byte_read from offset 0x%x\n", __FUNCTION__, offset);
/* SI_TX_DATA0 = read from offset */
regval =(0xa1<<16)|
((offset & 0xff)<<8) |
(0xa0 | ((offset & 0xff00)>>7));
BMI_write_reg(SI_BASE_ADDRESS+SI_TX_DATA0_OFFSET, regval);
regval = SI_CS_START_SET(1) |
SI_CS_RX_CNT_SET(8) |
SI_CS_TX_CNT_SET(3);
BMI_write_reg(SI_BASE_ADDRESS+SI_CS_OFFSET, regval);
}
/*
* Tell the Target to start a 4-byte write to EEPROM,
* writing values from Target TX_DATA registers.
*/
static void
request_4byte_write(int offset, u32 data)
{
u32 regval;
printk("%s: request_4byte_write (0x%x) to offset 0x%x\n", __FUNCTION__, data, offset);
/* SI_TX_DATA0 = write data to offset */
regval = ((data & 0xffff) <<16) |
((offset & 0xff)<<8) |
(0xa0 | ((offset & 0xff00)>>7));
BMI_write_reg(SI_BASE_ADDRESS+SI_TX_DATA0_OFFSET, regval);
regval = data >> 16;
BMI_write_reg(SI_BASE_ADDRESS+SI_TX_DATA1_OFFSET, regval);
regval = SI_CS_START_SET(1) |
SI_CS_RX_CNT_SET(0) |
SI_CS_TX_CNT_SET(6);
BMI_write_reg(SI_BASE_ADDRESS+SI_CS_OFFSET, regval);
}
/*
* Check whether or not an EEPROM request that was started
* earlier has completed yet.
*/
static bool
request_in_progress(void)
{
u32 regval;
/* Wait for DONE_INT in SI_CS */
BMI_read_reg(SI_BASE_ADDRESS+SI_CS_OFFSET, ®val);
// printk("%s: request in progress SI_CS=0x%x\n", __FUNCTION__, regval);
if (regval & SI_CS_DONE_ERR_MASK) {
printk("%s: EEPROM signaled ERROR (0x%x)\n", __FUNCTION__, regval);
}
return (!(regval & SI_CS_DONE_INT_MASK));
}
/*
* try to detect the type of EEPROM,16bit address or 8bit address
*/
static void eeprom_type_detect(void)
{
u32 regval;
u8 i = 0;
request_8byte_read(0x100);
/* Wait for DONE_INT in SI_CS */
do{
BMI_read_reg(SI_BASE_ADDRESS+SI_CS_OFFSET, ®val);
if (regval & SI_CS_DONE_ERR_MASK) {
printk("%s: ERROR : address type was wrongly set\n", __FUNCTION__);
break;
}
if (i++ == EEPROM_WAIT_LIMIT) {
printk("%s: EEPROM not responding\n", __FUNCTION__);
}
} while(!(regval & SI_CS_DONE_INT_MASK));
}
/*
* Extract the results of a completed EEPROM Read request
* and return them to the caller.
*/
inline void
read_8byte_results(u32 *data)
{
/* Read SI_RX_DATA0 and SI_RX_DATA1 */
BMI_read_reg(SI_BASE_ADDRESS+SI_RX_DATA0_OFFSET, &data[0]);
BMI_read_reg(SI_BASE_ADDRESS+SI_RX_DATA1_OFFSET, &data[1]);
}
/*
* Wait for a previously started command to complete.
* Timeout if the command is takes "too long".
*/
static void
wait_for_eeprom_completion(void)
{
int i=0;
while (request_in_progress()) {
if (i++ == EEPROM_WAIT_LIMIT) {
printk("%s: EEPROM not responding\n", __FUNCTION__);
}
}
}
/*
* High-level function which starts an 8-byte read,
* waits for it to complete, and returns the result.
*/
static void
fetch_8bytes(int offset, u32 *data)
{
request_8byte_read(offset);
wait_for_eeprom_completion();
read_8byte_results(data);
/* Clear any pending intr */
BMI_write_reg(SI_BASE_ADDRESS+SI_CS_OFFSET, SI_CS_DONE_INT_MASK);
}
/*
* High-level function which starts a 4-byte write,
* and waits for it to complete.
*/
inline void
commit_4bytes(int offset, u32 data)
{
request_4byte_write(offset, data);
wait_for_eeprom_completion();
}
| gpl-2.0 |
miguelangelramirez/magento.dev | app/code/core/Mage/GoogleBase/Model/Resource/Attribute.php | 1413 | <?php
/**
* Magento
*
* NOTICE OF LICENSE
*
* This source file is subject to the Open Software License (OSL 3.0)
* that is bundled with this package in the file LICENSE.txt.
* It is also available through the world-wide-web at this URL:
* http://opensource.org/licenses/osl-3.0.php
* If you did not receive a copy of the license and are unable to
* obtain it through the world-wide-web, please send an email
* to [email protected] so we can send you a copy immediately.
*
* DISCLAIMER
*
* Do not edit or add to this file if you wish to upgrade Magento to newer
* versions in the future. If you wish to customize Magento for your
* needs please refer to http://www.magento.com for more information.
*
* @category Mage
* @package Mage_GoogleBase
* @copyright Copyright (c) 2006-2017 X.commerce, Inc. and affiliates (http://www.magento.com)
* @license http://opensource.org/licenses/osl-3.0.php Open Software License (OSL 3.0)
*/
/**
* Google Base Attributes resource model
*
* @deprecated after 1.5.1.0
* @category Mage
* @package Mage_GoogleBase
* @author Magento Core Team <[email protected]>
*/
class Mage_GoogleBase_Model_Resource_Attribute extends Mage_Core_Model_Resource_Db_Abstract
{
/**
* Resource initialization
*
*/
protected function _construct()
{
$this->_init('googlebase/attributes', 'id');
}
}
| gpl-2.0 |
bjrambo/nurigo | modules/epay/m.skins/default/paymentform.html | 3888 | <load target="css/style.css" />
<load target="css/btn.css" />
<load target="../../tpl/js/common.js" />
<div class="payment_wrapper">
{$form_data}
</div>
<div class="payment_wrapper">
<div class="payment_title">
결제수단
</div>
<form id="epay_form" method="post" action="./">
<input type="hidden" name="act" value="" />
<input type="hidden" name="mid" value="{$mid}" />
<input type="hidden" name="module" value="{$module}" />
<input type="hidden" name="cartnos" value="{$cartnos}" />
<input type="hidden" name="epay_module_srl" value="{$epay_module_srl}" />
<input type="hidden" name="order_srl" value="{$order_srl}" />
<input type="hidden" name="transaction_srl" value="{$transaction_srl}" />
<ul id="select_method">
{@$count=0}
<li loop="$payment_methods=>$key,$val">
<label for="{$key}"><input type="radio" name="payment_method" id="{$key}" data-mid="{$val->mid}" data-module="{$val->module}" data-act="{$val->act}" data-mode="{$val->mode}" value="{$val->payment_method}" checked="checked"|cond="$count==0"/>{$val->title}</label>
{@$count++}
</li>
</ul>
{@$count=0}
<div class="payment_info">
<p loop="$payment_methods=>$key,$val" id="guide_{$key}" style="display:none;"|cond="$count>0">
{$val->guide}
{@$count++}
</p>
</div>
<div class="btn_wrap">
<button type="submit" class="btn nbtn">결제하기</button>
</div>
</form>
</div>
<script>
(function($) {
jQuery(function($) {
$('#epay_form').submit(function() {
var mid = $('#select_method input[name=payment_method]:checked').attr('data-mid');
var module = $('#select_method input[name=payment_method]:checked').attr('data-module');
var act = $('#select_method input[name=payment_method]:checked').attr('data-act');
var mode = $('#select_method input[name=payment_method]:checked').attr('data-mode');
if (mode == 'ajax') {
var params = new Array();
var responses = ['error','message','tpl'];
exec_xml(module, act, params, function(ret_obj) {
var tpl = ret_obj.tpl.replace(/<enter>/g, '\n');
$('#payment_form').html(tpl);
inipay_submit('fo_insert_order');
}, responses);
return false;
} else {
if (mid) $('#epay_form input[name=mid]').val(mid);
if (module) $('#epay_form input[name=module]').val(module);
if (act) $('#epay_form input[name=act]').val(act);
}
copy_form('fo_insert_order', 'epay_form');
});
$('#epay_form input[name=payment_method]').click(function() {
var id = $(this).attr('id');
$('#epay_form .payment_info p').hide();
$('#epay_form .payment_info p#guide_'+id).show();
});
});
}) (jQuery);
</script>
| gpl-2.0 |
Lsty/ygopro-scripts | c81254059.lua | 2593 | --ワーム・クィーン
function c81254059.initial_effect(c)
--summon with 1 tribute
local e1=Effect.CreateEffect(c)
e1:SetDescription(aux.Stringid(81254059,0))
e1:SetProperty(EFFECT_FLAG_CANNOT_DISABLE+EFFECT_FLAG_UNCOPYABLE)
e1:SetType(EFFECT_TYPE_SINGLE)
e1:SetCode(EFFECT_SUMMON_PROC)
e1:SetCondition(c81254059.otcon)
e1:SetOperation(c81254059.otop)
e1:SetValue(SUMMON_TYPE_ADVANCE)
c:RegisterEffect(e1)
--special summon
local e2=Effect.CreateEffect(c)
e2:SetDescription(aux.Stringid(81254059,1))
e2:SetCategory(CATEGORY_SPECIAL_SUMMON)
e2:SetType(EFFECT_TYPE_IGNITION)
e2:SetRange(LOCATION_MZONE)
e2:SetCountLimit(1)
e2:SetCost(c81254059.spcost)
e2:SetTarget(c81254059.sptg)
e2:SetOperation(c81254059.spop)
c:RegisterEffect(e2)
end
function c81254059.cfilter(c,tp)
return c:IsSetCard(0x3e) and c:IsRace(RACE_REPTILE) and (c:IsControler(tp) or c:IsFaceup())
end
function c81254059.otcon(e,c)
if c==nil then return true end
local tp=c:GetControler()
local mg=Duel.GetMatchingGroup(c81254059.cfilter,tp,LOCATION_MZONE,LOCATION_MZONE,nil,tp)
return c:GetLevel()>6 and Duel.GetLocationCount(tp,LOCATION_MZONE)>-1
and Duel.GetTributeCount(c,mg)>0
end
function c81254059.otop(e,tp,eg,ep,ev,re,r,rp,c)
local mg=Duel.GetMatchingGroup(c81254059.cfilter,tp,LOCATION_MZONE,LOCATION_MZONE,nil,tp)
local sg=Duel.SelectTribute(tp,c,1,1,mg)
c:SetMaterial(sg)
Duel.Release(sg,REASON_SUMMON+REASON_MATERIAL)
end
function c81254059.costfilter(c,e,tp)
return c:IsSetCard(0x3e) and c:IsRace(RACE_REPTILE)
and Duel.IsExistingMatchingCard(c81254059.spfilter,tp,LOCATION_DECK,0,1,nil,e,tp,c:GetLevel())
end
function c81254059.spfilter(c,e,tp,lv)
return c:IsSetCard(0x3e) and c:IsRace(RACE_REPTILE) and c:GetLevel()<=lv
and c:IsCanBeSpecialSummoned(e,0,tp,false,false)
end
function c81254059.spcost(e,tp,eg,ep,ev,re,r,rp,chk)
if chk==0 then return Duel.CheckReleaseGroup(tp,c81254059.costfilter,1,nil,e,tp) end
local sg=Duel.SelectReleaseGroup(tp,c81254059.costfilter,1,1,nil,e,tp)
e:SetLabel(sg:GetFirst():GetLevel())
Duel.Release(sg,REASON_COST)
end
function c81254059.sptg(e,tp,eg,ep,ev,re,r,rp,chk)
if chk==0 then return Duel.GetLocationCount(tp,LOCATION_MZONE)>-1 end
Duel.SetOperationInfo(0,CATEGORY_SPECIAL_SUMMON,nil,1,tp,LOCATION_DECK)
end
function c81254059.spop(e,tp,eg,ep,ev,re,r,rp)
if Duel.GetLocationCount(tp,LOCATION_MZONE)<=0 then return end
Duel.Hint(HINT_SELECTMSG,tp,HINTMSG_SPSUMMON)
local g=Duel.SelectMatchingCard(tp,c81254059.spfilter,tp,LOCATION_DECK,0,1,1,nil,e,tp,e:GetLabel())
Duel.SpecialSummon(g,0,tp,tp,false,false,POS_FACEUP)
end
| gpl-2.0 |
jchuang1977/openwrt | package/lean/mt/drivers/mt_wifi/src/mt_wifi/include/txpwr/BFBackoffTable_3.h | 4982 | /* AUTO GEN PLEASE DO NOT MODIFY IT */
/* AUTO GEN PLEASE DO NOT MODIFY IT */
UCHAR BFBackoffvalue_3[] = "! Single SKU Max Power Table (unit is 1 dBm) ! 2.4G Channel "
"Band: 2.4G MaxPower_4T MaxPower_3T MaxPower_2T Ch1 30 30 30 "
"Ch2 30 30 30 Ch3 30 30 30 "
"Ch4 30 30 30 Ch5 30 30 30 "
"Ch6 30 30 30 Ch7 30 30 30 "
"Ch8 30 30 30 Ch9 30 30 30 "
"Ch10 30 30 30 Ch11 30 30 30 "
"Ch12 30 30 30 Ch13 30 30 30 "
"Ch14 30 30 30 "
"! 5G Channel Band: 5G MaxPower_4T MaxPower_3T MaxPower_2T "
"Ch184 30 30 30 Ch188 30 30 30 "
"Ch192 30 30 30 Ch196 30 30 30 "
"Ch8 30 30 30 Ch12 30 30 30 "
"Ch16 30 30 30 Ch36 30 30 30 "
"Ch40 30 30 30 Ch44 30 30 30 "
"Ch48 30 30 30 Ch52 30 30 30 "
"Ch56 30 30 30 Ch60 30 30 30 "
"Ch64 30 30 30 Ch68 30 30 30 "
"Ch72 30 30 30 Ch76 30 30 30 "
"Ch80 30 30 30 Ch84 30 30 30 "
"Ch88 30 30 30 Ch92 30 30 30 "
"Ch96 30 30 30 Ch100 30 30 30 "
"Ch104 30 30 30 Ch108 30 30 30 "
"Ch112 30 30 30 Ch116 30 30 30 "
"Ch120 30 30 30 Ch124 30 30 30 "
"Ch128 30 30 30 Ch132 30 30 30 "
"Ch136 30 30 30 Ch140 30 30 30 "
"Ch144 30 30 30 Ch149 30 30 30 "
"Ch153 30 30 30 Ch157 30 30 30 "
"Ch161 30 30 30 Ch165 30 30 30 "
"Ch169 30 30 30 Ch173 30 30 30 "
"Ch177 30 30 30 Ch181 30 30 30 "
" "
" "
" "
" "
;
| gpl-2.0 |
SeeyaSia/www | web/modules/contrib/chosen/js/chosen.js | 2592 | (function ($, Drupal) {
'use strict';
Drupal.behaviors.chosen = {
attach: function (context, settings) {
settings.chosen = settings.chosen || drupalSettings.chosen;
// Prepare selector and add unwantend selectors.
var selector = settings.chosen.selector;
var options;
// Function to prepare all the options together for the chosen() call.
var getElementOptions = function (element) {
options = $.extend({}, settings.chosen.options);
// The width default option is considered the minimum width, so this
// must be evaluated for every option.
if (settings.chosen.minimum_width > 0) {
if ($(element).width() < settings.chosen.minimum_width) {
options.width = settings.chosen.minimum_width + 'px';
}
else {
options.width = $(element).width() + 'px';
}
}
// Some field widgets have cardinality, so we must respect that.
// @see chosen_pre_render_select()
if ($(element).attr('multiple') && $(element).data('cardinality')) {
options.max_selected_options = $(element).data('cardinality');
}
return options;
};
// Process elements that have opted-in for Chosen.
$('select.chosen-enable', context).once('chosen').each(function () {
options = getElementOptions(this);
$(this).chosen(options);
});
$(selector, context)
// Disabled on:
// - Field UI
// - WYSIWYG elements
// - Tabledrag weights
// - Elements that have opted-out of Chosen
// - Elements already processed by Chosen
.not('#field-ui-field-storage-add-form select, #entity-form-display-edit-form select, #entity-view-display-edit-form select, .wysiwyg, .draggable select[name$="[weight]"], .draggable select[name$="[position]"], .locale-translate-filter-form select, .chosen-disable, .chosen-processed')
.filter(function () {
// Filter out select widgets that do not meet the minimum number of
// options.
var minOptions = $(this).attr('multiple') ? settings.chosen.minimum_multiple : settings.chosen.minimum_single;
if (!minOptions) {
// Zero value means no minimum.
return true;
}
else {
return $(this).find('option').length >= minOptions;
}
})
.once('chosen').each(function () {
options = getElementOptions(this);
$(this).chosen(options);
});
}
};
})(jQuery, Drupal);
| gpl-2.0 |
rakeshkaundilya/phpmyadmin | libraries/parse_analyze.inc.php | 4562 | <?php
/* vim: set expandtab sw=4 ts=4 sts=4: */
/**
* Parse and analyse a SQL query
*
* @package PhpMyAdmin
*/
if (! defined('PHPMYADMIN')) {
exit;
}
/**
*
*/
$GLOBALS['unparsed_sql'] = $sql_query;
$parsed_sql = PMA_SQP_parse($sql_query);
$analyzed_sql = PMA_SQP_analyze($parsed_sql);
// for bug 780516: now that we use case insensitive preg_match
// or flags from the analyser, do not put back the reformatted query
// into $sql_query, to make this kind of query work without
// capitalizing keywords:
//
// CREATE TABLE SG_Persons (
// id int(10) unsigned NOT NULL auto_increment,
// first varchar(64) NOT NULL default '',
// PRIMARY KEY (`id`)
// )
// Fills some variables from the analysed SQL
// A table has to be created, renamed, dropped:
// the navigation panel should be reloaded
$reload = isset($analyzed_sql[0]['queryflags']['reload']);
// check for drop database
$drop_database = isset($analyzed_sql[0]['queryflags']['drop_database']);
// for the presence of EXPLAIN
$is_explain = isset($analyzed_sql[0]['queryflags']['is_explain']);
// for the presence of DELETE
$is_delete = isset($analyzed_sql[0]['queryflags']['is_delete']);
// for the presence of UPDATE, DELETE or INSERT|LOAD DATA|REPLACE
$is_affected = isset($analyzed_sql[0]['queryflags']['is_affected']);
// for the presence of REPLACE
$is_replace = isset($analyzed_sql[0]['queryflags']['is_replace']);
// for the presence of INSERT
$is_insert = isset($analyzed_sql[0]['queryflags']['is_insert']);
// for the presence of CHECK|ANALYZE|REPAIR|OPTIMIZE|CHECKSUM TABLE
$is_maint = isset($analyzed_sql[0]['queryflags']['is_maint']);
// for the presence of SHOW
$is_show = isset($analyzed_sql[0]['queryflags']['is_show']);
// for the presence of PROCEDURE ANALYSE
$is_analyse = isset($analyzed_sql[0]['queryflags']['is_analyse']);
// for the presence of INTO OUTFILE
$is_export = isset($analyzed_sql[0]['queryflags']['is_export']);
// for the presence of GROUP BY|HAVING|SELECT DISTINCT
$is_group = isset($analyzed_sql[0]['queryflags']['is_group']);
// for the presence of SUM|AVG|STD|STDDEV|MIN|MAX|BIT_OR|BIT_AND
$is_func = isset($analyzed_sql[0]['queryflags']['is_func']);
// for the presence of SELECT COUNT
$is_count = isset($analyzed_sql[0]['queryflags']['is_count']);
// check for a real SELECT ... FROM
$is_select = isset($analyzed_sql[0]['queryflags']['select_from']);
// the query contains a subquery
$is_subquery = isset($analyzed_sql[0]['queryflags']['is_subquery']);
// check for CALL
// Since multiple query execution is anyway handled,
// ignore the WHERE clause of the first sql statement
// which might contain a phrase like 'call '
if (isset($analyzed_sql[0]['queryflags']['is_procedure'])
&& empty($analyzed_sql[0]['where_clause'])
) {
$is_procedure = true;
} else {
$is_procedure = false;
}
// aggregates all the results into one array
$analyzed_sql_results = array(
"parsed_sql" => $parsed_sql,
"analyzed_sql" => $analyzed_sql,
"reload" => $reload,
"drop_database" => $drop_database,
"is_explain" => $is_explain,
"is_delete" => $is_delete,
"is_affected" => $is_affected,
"is_replace" => $is_replace,
"is_insert" => $is_insert,
"is_maint" => $is_maint,
"is_show" => $is_show,
"is_analyse" => $is_analyse,
"is_export" => $is_export,
"is_group" => $is_group,
"is_func" => $is_func,
"is_count" => $is_count,
"is_select" => $is_select,
"is_procedure" => $is_procedure,
"is_subquery" => $is_subquery
);
// If the query is a Select, extract the db and table names and modify
// $db and $table, to have correct page headers, links and left frame.
// db and table name may be enclosed with backquotes, db is optional,
// query may contain aliases.
/**
* @todo if there are more than one table name in the Select:
* - do not extract the first table name
* - do not show a table name in the page header
* - do not display the sub-pages links)
*/
if ($is_select) {
$prev_db = $db;
if (isset($analyzed_sql[0]['table_ref'][0]['table_true_name'])) {
$table = $analyzed_sql[0]['table_ref'][0]['table_true_name'];
}
if (isset($analyzed_sql[0]['table_ref'][0]['db'])
&& /*overload*/mb_strlen($analyzed_sql[0]['table_ref'][0]['db'])
) {
$db = $analyzed_sql[0]['table_ref'][0]['db'];
} else {
$db = $prev_db;
}
// Don't change reload, if we already decided to reload in import
if (empty($reload) && empty($GLOBALS['is_ajax_request'])) {
$reload = ($db == $prev_db) ? 0 : 1;
}
}
?>
| gpl-2.0 |
babycaseny/fuse-fuse | include/fuse_lowlevel.h | 49909 | /*
FUSE: Filesystem in Userspace
Copyright (C) 2001-2007 Miklos Szeredi <[email protected]>
This program can be distributed under the terms of the GNU LGPLv2.
See the file COPYING.LIB.
*/
#ifndef _FUSE_LOWLEVEL_H_
#define _FUSE_LOWLEVEL_H_
/** @file
*
* Low level API
*
* IMPORTANT: you should define FUSE_USE_VERSION before including this
* header. To use the newest API define it to 26 (recommended for any
* new application), to use the old API define it to 24 (default) or
* 25
*/
#ifndef FUSE_USE_VERSION
#define FUSE_USE_VERSION 24
#endif
#include "fuse_common.h"
#include <utime.h>
#include <fcntl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/statvfs.h>
#include <sys/uio.h>
#ifdef __cplusplus
extern "C" {
#endif
/* ----------------------------------------------------------- *
* Miscellaneous definitions *
* ----------------------------------------------------------- */
/** The node ID of the root inode */
#define FUSE_ROOT_ID 1
/** Inode number type */
typedef uint64_t fuse_ino_t;
/** Request pointer type */
typedef struct fuse_req *fuse_req_t;
/**
* Session
*
* This provides hooks for processing requests, and exiting
*/
struct fuse_session;
/**
* Channel
*
* A communication channel, providing hooks for sending and receiving
* messages
*/
struct fuse_chan;
/** Directory entry parameters supplied to fuse_reply_entry() */
struct fuse_entry_param {
/** Unique inode number
*
* In lookup, zero means negative entry (from version 2.5)
* Returning ENOENT also means negative entry, but by setting zero
* ino the kernel may cache negative entries for entry_timeout
* seconds.
*/
fuse_ino_t ino;
/** Generation number for this entry.
*
* If the file system will be exported over NFS, the
* ino/generation pairs need to be unique over the file
* system's lifetime (rather than just the mount time). So if
* the file system reuses an inode after it has been deleted,
* it must assign a new, previously unused generation number
* to the inode at the same time.
*
* The generation must be non-zero, otherwise FUSE will treat
* it as an error.
*
*/
uint64_t generation;
/** Inode attributes.
*
* Even if attr_timeout == 0, attr must be correct. For example,
* for open(), FUSE uses attr.st_size from lookup() to determine
* how many bytes to request. If this value is not correct,
* incorrect data will be returned.
*/
struct stat attr;
/** Validity timeout (in seconds) for the attributes */
double attr_timeout;
/** Validity timeout (in seconds) for the name */
double entry_timeout;
};
/** Additional context associated with requests */
struct fuse_ctx {
/** User ID of the calling process */
uid_t uid;
/** Group ID of the calling process */
gid_t gid;
/** Thread ID of the calling process */
pid_t pid;
/** Umask of the calling process (introduced in version 2.8) */
mode_t umask;
};
struct fuse_forget_data {
fuse_ino_t ino;
uint64_t nlookup;
};
/* 'to_set' flags in setattr */
#define FUSE_SET_ATTR_MODE (1 << 0)
#define FUSE_SET_ATTR_UID (1 << 1)
#define FUSE_SET_ATTR_GID (1 << 2)
#define FUSE_SET_ATTR_SIZE (1 << 3)
#define FUSE_SET_ATTR_ATIME (1 << 4)
#define FUSE_SET_ATTR_MTIME (1 << 5)
#define FUSE_SET_ATTR_ATIME_NOW (1 << 7)
#define FUSE_SET_ATTR_MTIME_NOW (1 << 8)
#define FUSE_SET_ATTR_CTIME (1 << 10)
/* ----------------------------------------------------------- *
* Request methods and replies *
* ----------------------------------------------------------- */
/**
* Low level filesystem operations
*
* Most of the methods (with the exception of init and destroy)
* receive a request handle (fuse_req_t) as their first argument.
* This handle must be passed to one of the specified reply functions.
*
* This may be done inside the method invocation, or after the call
* has returned. The request handle is valid until one of the reply
* functions is called.
*
* Other pointer arguments (name, fuse_file_info, etc) are not valid
* after the call has returned, so if they are needed later, their
* contents have to be copied.
*
* The filesystem sometimes needs to handle a return value of -ENOENT
* from the reply function, which means, that the request was
* interrupted, and the reply discarded. For example if
* fuse_reply_open() return -ENOENT means, that the release method for
* this file will not be called.
*/
struct fuse_lowlevel_ops {
/**
* Initialize filesystem
*
* Called before any other filesystem method
*
* There's no reply to this function
*
* @param userdata the user data passed to fuse_lowlevel_new()
*/
void (*init) (void *userdata, struct fuse_conn_info *conn);
/**
* Clean up filesystem
*
* Called on filesystem exit
*
* There's no reply to this function
*
* @param userdata the user data passed to fuse_lowlevel_new()
*/
void (*destroy) (void *userdata);
/**
* Look up a directory entry by name and get its attributes.
*
* Valid replies:
* fuse_reply_entry
* fuse_reply_err
*
* @param req request handle
* @param parent inode number of the parent directory
* @param name the name to look up
*/
void (*lookup) (fuse_req_t req, fuse_ino_t parent, const char *name);
/**
* Forget about an inode
*
* This function is called when the kernel removes an inode
* from its internal caches.
*
* The inode's lookup count increases by one for every call to
* fuse_reply_entry and fuse_reply_create. The nlookup parameter
* indicates by how much the lookup count should be decreased.
*
* Inodes with a non-zero lookup count may receive request from
* the kernel even after calls to unlink, rmdir or (when
* overwriting an existing file) rename. Filesystems must handle
* such requests properly and it is recommended to defer removal
* of the inode until the lookup count reaches zero. Calls to
* unlink, remdir or rename will be followed closely by forget
* unless the file or directory is open, in which case the
* kernel issues forget only after the release or releasedir
* calls.
*
* Note that if a file system will be exported over NFS the
* inodes lifetime must extend even beyond forget. See the
* generation field in struct fuse_entry_param above.
*
* On unmount the lookup count for all inodes implicitly drops
* to zero. It is not guaranteed that the file system will
* receive corresponding forget messages for the affected
* inodes.
*
* Valid replies:
* fuse_reply_none
*
* @param req request handle
* @param ino the inode number
* @param nlookup the number of lookups to forget
*/
void (*forget) (fuse_req_t req, fuse_ino_t ino, uint64_t nlookup);
/**
* Get file attributes
*
* Valid replies:
* fuse_reply_attr
* fuse_reply_err
*
* @param req request handle
* @param ino the inode number
* @param fi for future use, currently always NULL
*/
void (*getattr) (fuse_req_t req, fuse_ino_t ino,
struct fuse_file_info *fi);
/**
* Set file attributes
*
* In the 'attr' argument only members indicated by the 'to_set'
* bitmask contain valid values. Other members contain undefined
* values.
*
* If the setattr was invoked from the ftruncate() system call
* under Linux kernel versions 2.6.15 or later, the fi->fh will
* contain the value set by the open method or will be undefined
* if the open method didn't set any value. Otherwise (not
* ftruncate call, or kernel version earlier than 2.6.15) the fi
* parameter will be NULL.
*
* Valid replies:
* fuse_reply_attr
* fuse_reply_err
*
* @param req request handle
* @param ino the inode number
* @param attr the attributes
* @param to_set bit mask of attributes which should be set
* @param fi file information, or NULL
*
* Changed in version 2.5:
* file information filled in for ftruncate
*/
void (*setattr) (fuse_req_t req, fuse_ino_t ino, struct stat *attr,
int to_set, struct fuse_file_info *fi);
/**
* Read symbolic link
*
* Valid replies:
* fuse_reply_readlink
* fuse_reply_err
*
* @param req request handle
* @param ino the inode number
*/
void (*readlink) (fuse_req_t req, fuse_ino_t ino);
/**
* Create file node
*
* Create a regular file, character device, block device, fifo or
* socket node.
*
* Valid replies:
* fuse_reply_entry
* fuse_reply_err
*
* @param req request handle
* @param parent inode number of the parent directory
* @param name to create
* @param mode file type and mode with which to create the new file
* @param rdev the device number (only valid if created file is a device)
*/
void (*mknod) (fuse_req_t req, fuse_ino_t parent, const char *name,
mode_t mode, dev_t rdev);
/**
* Create a directory
*
* Valid replies:
* fuse_reply_entry
* fuse_reply_err
*
* @param req request handle
* @param parent inode number of the parent directory
* @param name to create
* @param mode with which to create the new file
*/
void (*mkdir) (fuse_req_t req, fuse_ino_t parent, const char *name,
mode_t mode);
/**
* Remove a file
*
* If the file's inode's lookup count is non-zero, the file
* system is expected to postpone any removal of the inode
* until the lookup count reaches zero (see description of the
* forget function).
*
* Valid replies:
* fuse_reply_err
*
* @param req request handle
* @param parent inode number of the parent directory
* @param name to remove
*/
void (*unlink) (fuse_req_t req, fuse_ino_t parent, const char *name);
/**
* Remove a directory
*
* If the directory's inode's lookup count is non-zero, the
* file system is expected to postpone any removal of the
* inode until the lookup count reaches zero (see description
* of the forget function).
*
* Valid replies:
* fuse_reply_err
*
* @param req request handle
* @param parent inode number of the parent directory
* @param name to remove
*/
void (*rmdir) (fuse_req_t req, fuse_ino_t parent, const char *name);
/**
* Create a symbolic link
*
* Valid replies:
* fuse_reply_entry
* fuse_reply_err
*
* @param req request handle
* @param link the contents of the symbolic link
* @param parent inode number of the parent directory
* @param name to create
*/
void (*symlink) (fuse_req_t req, const char *link, fuse_ino_t parent,
const char *name);
/** Rename a file
*
* If the target exists it should be atomically replaced. If
* the target's inode's lookup count is non-zero, the file
* system is expected to postpone any removal of the inode
* until the lookup count reaches zero (see description of the
* forget function).
*
* Valid replies:
* fuse_reply_err
*
* @param req request handle
* @param parent inode number of the old parent directory
* @param name old name
* @param newparent inode number of the new parent directory
* @param newname new name
*/
void (*rename) (fuse_req_t req, fuse_ino_t parent, const char *name,
fuse_ino_t newparent, const char *newname,
unsigned int flags);
/**
* Create a hard link
*
* Valid replies:
* fuse_reply_entry
* fuse_reply_err
*
* @param req request handle
* @param ino the old inode number
* @param newparent inode number of the new parent directory
* @param newname new name to create
*/
void (*link) (fuse_req_t req, fuse_ino_t ino, fuse_ino_t newparent,
const char *newname);
/**
* Open a file
*
* Open flags (with the exception of O_CREAT, O_EXCL, O_NOCTTY and
* O_TRUNC) are available in fi->flags.
*
* Filesystem may store an arbitrary file handle (pointer, index,
* etc) in fi->fh, and use this in other all other file operations
* (read, write, flush, release, fsync).
*
* Filesystem may also implement stateless file I/O and not store
* anything in fi->fh.
*
* There are also some flags (direct_io, keep_cache) which the
* filesystem may set in fi, to change the way the file is opened.
* See fuse_file_info structure in <fuse_common.h> for more details.
*
* Valid replies:
* fuse_reply_open
* fuse_reply_err
*
* @param req request handle
* @param ino the inode number
* @param fi file information
*/
void (*open) (fuse_req_t req, fuse_ino_t ino,
struct fuse_file_info *fi);
/**
* Read data
*
* Read should send exactly the number of bytes requested except
* on EOF or error, otherwise the rest of the data will be
* substituted with zeroes. An exception to this is when the file
* has been opened in 'direct_io' mode, in which case the return
* value of the read system call will reflect the return value of
* this operation.
*
* fi->fh will contain the value set by the open method, or will
* be undefined if the open method didn't set any value.
*
* Valid replies:
* fuse_reply_buf
* fuse_reply_iov
* fuse_reply_data
* fuse_reply_err
*
* @param req request handle
* @param ino the inode number
* @param size number of bytes to read
* @param off offset to read from
* @param fi file information
*/
void (*read) (fuse_req_t req, fuse_ino_t ino, size_t size, off_t off,
struct fuse_file_info *fi);
/**
* Write data
*
* Write should return exactly the number of bytes requested
* except on error. An exception to this is when the file has
* been opened in 'direct_io' mode, in which case the return value
* of the write system call will reflect the return value of this
* operation.
*
* fi->fh will contain the value set by the open method, or will
* be undefined if the open method didn't set any value.
*
* Valid replies:
* fuse_reply_write
* fuse_reply_err
*
* @param req request handle
* @param ino the inode number
* @param buf data to write
* @param size number of bytes to write
* @param off offset to write to
* @param fi file information
*/
void (*write) (fuse_req_t req, fuse_ino_t ino, const char *buf,
size_t size, off_t off, struct fuse_file_info *fi);
/**
* Flush method
*
* This is called on each close() of the opened file.
*
* Since file descriptors can be duplicated (dup, dup2, fork), for
* one open call there may be many flush calls.
*
* Filesystems shouldn't assume that flush will always be called
* after some writes, or that if will be called at all.
*
* fi->fh will contain the value set by the open method, or will
* be undefined if the open method didn't set any value.
*
* NOTE: the name of the method is misleading, since (unlike
* fsync) the filesystem is not forced to flush pending writes.
* One reason to flush data, is if the filesystem wants to return
* write errors.
*
* If the filesystem supports file locking operations (setlk,
* getlk) it should remove all locks belonging to 'fi->owner'.
*
* Valid replies:
* fuse_reply_err
*
* @param req request handle
* @param ino the inode number
* @param fi file information
*/
void (*flush) (fuse_req_t req, fuse_ino_t ino,
struct fuse_file_info *fi);
/**
* Release an open file
*
* Release is called when there are no more references to an open
* file: all file descriptors are closed and all memory mappings
* are unmapped.
*
* For every open call there will be exactly one release call.
*
* The filesystem may reply with an error, but error values are
* not returned to close() or munmap() which triggered the
* release.
*
* fi->fh will contain the value set by the open method, or will
* be undefined if the open method didn't set any value.
* fi->flags will contain the same flags as for open.
*
* Valid replies:
* fuse_reply_err
*
* @param req request handle
* @param ino the inode number
* @param fi file information
*/
void (*release) (fuse_req_t req, fuse_ino_t ino,
struct fuse_file_info *fi);
/**
* Synchronize file contents
*
* If the datasync parameter is non-zero, then only the user data
* should be flushed, not the meta data.
*
* Valid replies:
* fuse_reply_err
*
* @param req request handle
* @param ino the inode number
* @param datasync flag indicating if only data should be flushed
* @param fi file information
*/
void (*fsync) (fuse_req_t req, fuse_ino_t ino, int datasync,
struct fuse_file_info *fi);
/**
* Open a directory
*
* Filesystem may store an arbitrary file handle (pointer, index,
* etc) in fi->fh, and use this in other all other directory
* stream operations (readdir, releasedir, fsyncdir).
*
* Filesystem may also implement stateless directory I/O and not
* store anything in fi->fh, though that makes it impossible to
* implement standard conforming directory stream operations in
* case the contents of the directory can change between opendir
* and releasedir.
*
* Valid replies:
* fuse_reply_open
* fuse_reply_err
*
* @param req request handle
* @param ino the inode number
* @param fi file information
*/
void (*opendir) (fuse_req_t req, fuse_ino_t ino,
struct fuse_file_info *fi);
/**
* Read directory
*
* Send a buffer filled using fuse_add_direntry(), with size not
* exceeding the requested size. Send an empty buffer on end of
* stream.
*
* fi->fh will contain the value set by the opendir method, or
* will be undefined if the opendir method didn't set any value.
*
* Returning a directory entry from readdir() does not affect
* its lookup count.
*
* Valid replies:
* fuse_reply_buf
* fuse_reply_data
* fuse_reply_err
*
* @param req request handle
* @param ino the inode number
* @param size maximum number of bytes to send
* @param off offset to continue reading the directory stream
* @param fi file information
*/
void (*readdir) (fuse_req_t req, fuse_ino_t ino, size_t size, off_t off,
struct fuse_file_info *fi);
/**
* Release an open directory
*
* For every opendir call there will be exactly one releasedir
* call.
*
* fi->fh will contain the value set by the opendir method, or
* will be undefined if the opendir method didn't set any value.
*
* Valid replies:
* fuse_reply_err
*
* @param req request handle
* @param ino the inode number
* @param fi file information
*/
void (*releasedir) (fuse_req_t req, fuse_ino_t ino,
struct fuse_file_info *fi);
/**
* Synchronize directory contents
*
* If the datasync parameter is non-zero, then only the directory
* contents should be flushed, not the meta data.
*
* fi->fh will contain the value set by the opendir method, or
* will be undefined if the opendir method didn't set any value.
*
* Valid replies:
* fuse_reply_err
*
* @param req request handle
* @param ino the inode number
* @param datasync flag indicating if only data should be flushed
* @param fi file information
*/
void (*fsyncdir) (fuse_req_t req, fuse_ino_t ino, int datasync,
struct fuse_file_info *fi);
/**
* Get file system statistics
*
* Valid replies:
* fuse_reply_statfs
* fuse_reply_err
*
* @param req request handle
* @param ino the inode number, zero means "undefined"
*/
void (*statfs) (fuse_req_t req, fuse_ino_t ino);
/**
* Set an extended attribute
*
* Valid replies:
* fuse_reply_err
*/
void (*setxattr) (fuse_req_t req, fuse_ino_t ino, const char *name,
const char *value, size_t size, int flags);
/**
* Get an extended attribute
*
* If size is zero, the size of the value should be sent with
* fuse_reply_xattr.
*
* If the size is non-zero, and the value fits in the buffer, the
* value should be sent with fuse_reply_buf.
*
* If the size is too small for the value, the ERANGE error should
* be sent.
*
* Valid replies:
* fuse_reply_buf
* fuse_reply_data
* fuse_reply_xattr
* fuse_reply_err
*
* @param req request handle
* @param ino the inode number
* @param name of the extended attribute
* @param size maximum size of the value to send
*/
void (*getxattr) (fuse_req_t req, fuse_ino_t ino, const char *name,
size_t size);
/**
* List extended attribute names
*
* If size is zero, the total size of the attribute list should be
* sent with fuse_reply_xattr.
*
* If the size is non-zero, and the null character separated
* attribute list fits in the buffer, the list should be sent with
* fuse_reply_buf.
*
* If the size is too small for the list, the ERANGE error should
* be sent.
*
* Valid replies:
* fuse_reply_buf
* fuse_reply_data
* fuse_reply_xattr
* fuse_reply_err
*
* @param req request handle
* @param ino the inode number
* @param size maximum size of the list to send
*/
void (*listxattr) (fuse_req_t req, fuse_ino_t ino, size_t size);
/**
* Remove an extended attribute
*
* Valid replies:
* fuse_reply_err
*
* @param req request handle
* @param ino the inode number
* @param name of the extended attribute
*/
void (*removexattr) (fuse_req_t req, fuse_ino_t ino, const char *name);
/**
* Check file access permissions
*
* This will be called for the access() system call. If the
* 'default_permissions' mount option is given, this method is not
* called.
*
* This method is not called under Linux kernel versions 2.4.x
*
* Introduced in version 2.5
*
* Valid replies:
* fuse_reply_err
*
* @param req request handle
* @param ino the inode number
* @param mask requested access mode
*/
void (*access) (fuse_req_t req, fuse_ino_t ino, int mask);
/**
* Create and open a file
*
* If the file does not exist, first create it with the specified
* mode, and then open it.
*
* Open flags (with the exception of O_NOCTTY) are available in
* fi->flags.
*
* Filesystem may store an arbitrary file handle (pointer, index,
* etc) in fi->fh, and use this in other all other file operations
* (read, write, flush, release, fsync).
*
* There are also some flags (direct_io, keep_cache) which the
* filesystem may set in fi, to change the way the file is opened.
* See fuse_file_info structure in <fuse_common.h> for more details.
*
* If this method is not implemented or under Linux kernel
* versions earlier than 2.6.15, the mknod() and open() methods
* will be called instead.
*
* Introduced in version 2.5
*
* Valid replies:
* fuse_reply_create
* fuse_reply_err
*
* @param req request handle
* @param parent inode number of the parent directory
* @param name to create
* @param mode file type and mode with which to create the new file
* @param fi file information
*/
void (*create) (fuse_req_t req, fuse_ino_t parent, const char *name,
mode_t mode, struct fuse_file_info *fi);
/**
* Test for a POSIX file lock
*
* Introduced in version 2.6
*
* Valid replies:
* fuse_reply_lock
* fuse_reply_err
*
* @param req request handle
* @param ino the inode number
* @param fi file information
* @param lock the region/type to test
*/
void (*getlk) (fuse_req_t req, fuse_ino_t ino,
struct fuse_file_info *fi, struct flock *lock);
/**
* Acquire, modify or release a POSIX file lock
*
* For POSIX threads (NPTL) there's a 1-1 relation between pid and
* owner, but otherwise this is not always the case. For checking
* lock ownership, 'fi->owner' must be used. The l_pid field in
* 'struct flock' should only be used to fill in this field in
* getlk().
*
* Note: if the locking methods are not implemented, the kernel
* will still allow file locking to work locally. Hence these are
* only interesting for network filesystems and similar.
*
* Introduced in version 2.6
*
* Valid replies:
* fuse_reply_err
*
* @param req request handle
* @param ino the inode number
* @param fi file information
* @param lock the region/type to set
* @param sleep locking operation may sleep
*/
void (*setlk) (fuse_req_t req, fuse_ino_t ino,
struct fuse_file_info *fi,
struct flock *lock, int sleep);
/**
* Map block index within file to block index within device
*
* Note: This makes sense only for block device backed filesystems
* mounted with the 'blkdev' option
*
* Introduced in version 2.6
*
* Valid replies:
* fuse_reply_bmap
* fuse_reply_err
*
* @param req request handle
* @param ino the inode number
* @param blocksize unit of block index
* @param idx block index within file
*/
void (*bmap) (fuse_req_t req, fuse_ino_t ino, size_t blocksize,
uint64_t idx);
/**
* Ioctl
*
* Note: For unrestricted ioctls (not allowed for FUSE
* servers), data in and out areas can be discovered by giving
* iovs and setting FUSE_IOCTL_RETRY in @flags. For
* restricted ioctls, kernel prepares in/out data area
* according to the information encoded in cmd.
*
* Introduced in version 2.8
*
* Valid replies:
* fuse_reply_ioctl_retry
* fuse_reply_ioctl
* fuse_reply_ioctl_iov
* fuse_reply_err
*
* @param req request handle
* @param ino the inode number
* @param cmd ioctl command
* @param arg ioctl argument
* @param fi file information
* @param flags for FUSE_IOCTL_* flags
* @param in_buf data fetched from the caller
* @param in_bufsz number of fetched bytes
* @param out_bufsz maximum size of output data
*/
void (*ioctl) (fuse_req_t req, fuse_ino_t ino, int cmd, void *arg,
struct fuse_file_info *fi, unsigned flags,
const void *in_buf, size_t in_bufsz, size_t out_bufsz);
/**
* Poll for IO readiness
*
* Introduced in version 2.8
*
* Note: If ph is non-NULL, the client should notify
* when IO readiness events occur by calling
* fuse_lowelevel_notify_poll() with the specified ph.
*
* Regardless of the number of times poll with a non-NULL ph
* is received, single notification is enough to clear all.
* Notifying more times incurs overhead but doesn't harm
* correctness.
*
* The callee is responsible for destroying ph with
* fuse_pollhandle_destroy() when no longer in use.
*
* Valid replies:
* fuse_reply_poll
* fuse_reply_err
*
* @param req request handle
* @param ino the inode number
* @param fi file information
* @param ph poll handle to be used for notification
*/
void (*poll) (fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi,
struct fuse_pollhandle *ph);
/**
* Write data made available in a buffer
*
* This is a more generic version of the ->write() method. If
* FUSE_CAP_SPLICE_READ is set in fuse_conn_info.want and the
* kernel supports splicing from the fuse device, then the
* data will be made available in pipe for supporting zero
* copy data transfer.
*
* Introduced in version 2.9
*
* Valid replies:
* fuse_reply_write
* fuse_reply_err
*
* @param req request handle
* @param ino the inode number
* @param bufv buffer containing the data
* @param off offset to write to
* @param fi file information
*/
void (*write_buf) (fuse_req_t req, fuse_ino_t ino,
struct fuse_bufvec *bufv, off_t off,
struct fuse_file_info *fi);
/**
* Callback function for the retrieve request
*
* Introduced in version 2.9
*
* Valid replies:
* fuse_reply_none
*
* @param req request handle
* @param cookie user data supplied to fuse_lowlevel_notify_retrieve()
* @param ino the inode number supplied to fuse_lowlevel_notify_retrieve()
* @param offset the offset supplied to fuse_lowlevel_notify_retrieve()
* @param bufv the buffer containing the returned data
*/
void (*retrieve_reply) (fuse_req_t req, void *cookie, fuse_ino_t ino,
off_t offset, struct fuse_bufvec *bufv);
/**
* Forget about multiple inodes
*
* See description of the forget function for more
* information.
*
* Introduced in version 2.9
*
* Valid replies:
* fuse_reply_none
*
* @param req request handle
*/
void (*forget_multi) (fuse_req_t req, size_t count,
struct fuse_forget_data *forgets);
/**
* Acquire, modify or release a BSD file lock
*
* Note: if the locking methods are not implemented, the kernel
* will still allow file locking to work locally. Hence these are
* only interesting for network filesystems and similar.
*
* Introduced in version 2.9
*
* Valid replies:
* fuse_reply_err
*
* @param req request handle
* @param ino the inode number
* @param fi file information
* @param op the locking operation, see flock(2)
*/
void (*flock) (fuse_req_t req, fuse_ino_t ino,
struct fuse_file_info *fi, int op);
/**
* Allocate requested space. If this function returns success then
* subsequent writes to the specified range shall not fail due to the lack
* of free space on the file system storage media.
*
* Introduced in version 2.9
*
* Valid replies:
* fuse_reply_err
*
* @param req request handle
* @param ino the inode number
* @param offset starting point for allocated region
* @param length size of allocated region
* @param mode determines the operation to be performed on the given range,
* see fallocate(2)
*/
void (*fallocate) (fuse_req_t req, fuse_ino_t ino, int mode,
off_t offset, off_t length, struct fuse_file_info *fi);
/**
* Read directory with attributes
*
* Send a buffer filled using fuse_add_direntry_plus(), with size not
* exceeding the requested size. Send an empty buffer on end of
* stream.
*
* fi->fh will contain the value set by the opendir method, or
* will be undefined if the opendir method didn't set any value.
*
* In contrast to readdir() (which does not affect the lookup counts),
* the lookup count of every entry returned by readdirplus(), except "."
* and "..", is incremented by one.
*
* Introduced in version 3.0
*
* Valid replies:
* fuse_reply_buf
* fuse_reply_data
* fuse_reply_err
*
* @param req request handle
* @param ino the inode number
* @param size maximum number of bytes to send
* @param off offset to continue reading the directory stream
* @param fi file information
*/
void (*readdirplus) (fuse_req_t req, fuse_ino_t ino, size_t size, off_t off,
struct fuse_file_info *fi);
};
/**
* Reply with an error code or success
*
* Possible requests:
* all except forget
*
* unlink, rmdir, rename, flush, release, fsync, fsyncdir, setxattr,
* removexattr and setlk may send a zero code
*
* @param req request handle
* @param err the positive error value, or zero for success
* @return zero for success, -errno for failure to send reply
*/
int fuse_reply_err(fuse_req_t req, int err);
/**
* Don't send reply
*
* Possible requests:
* forget
*
* @param req request handle
*/
void fuse_reply_none(fuse_req_t req);
/**
* Reply with a directory entry
*
* Possible requests:
* lookup, mknod, mkdir, symlink, link
*
* Side effects:
* increments the lookup count on success
*
* @param req request handle
* @param e the entry parameters
* @return zero for success, -errno for failure to send reply
*/
int fuse_reply_entry(fuse_req_t req, const struct fuse_entry_param *e);
/**
* Reply with a directory entry and open parameters
*
* currently the following members of 'fi' are used:
* fh, direct_io, keep_cache
*
* Possible requests:
* create
*
* Side effects:
* increments the lookup count on success
*
* @param req request handle
* @param e the entry parameters
* @param fi file information
* @return zero for success, -errno for failure to send reply
*/
int fuse_reply_create(fuse_req_t req, const struct fuse_entry_param *e,
const struct fuse_file_info *fi);
/**
* Reply with attributes
*
* Possible requests:
* getattr, setattr
*
* @param req request handle
* @param attr the attributes
* @param attr_timeout validity timeout (in seconds) for the attributes
* @return zero for success, -errno for failure to send reply
*/
int fuse_reply_attr(fuse_req_t req, const struct stat *attr,
double attr_timeout);
/**
* Reply with the contents of a symbolic link
*
* Possible requests:
* readlink
*
* @param req request handle
* @param link symbolic link contents
* @return zero for success, -errno for failure to send reply
*/
int fuse_reply_readlink(fuse_req_t req, const char *link);
/**
* Reply with open parameters
*
* currently the following members of 'fi' are used:
* fh, direct_io, keep_cache
*
* Possible requests:
* open, opendir
*
* @param req request handle
* @param fi file information
* @return zero for success, -errno for failure to send reply
*/
int fuse_reply_open(fuse_req_t req, const struct fuse_file_info *fi);
/**
* Reply with number of bytes written
*
* Possible requests:
* write
*
* @param req request handle
* @param count the number of bytes written
* @return zero for success, -errno for failure to send reply
*/
int fuse_reply_write(fuse_req_t req, size_t count);
/**
* Reply with data
*
* Possible requests:
* read, readdir, getxattr, listxattr
*
* @param req request handle
* @param buf buffer containing data
* @param size the size of data in bytes
* @return zero for success, -errno for failure to send reply
*/
int fuse_reply_buf(fuse_req_t req, const char *buf, size_t size);
/**
* Reply with data copied/moved from buffer(s)
*
* Possible requests:
* read, readdir, getxattr, listxattr
*
* Side effects:
* when used to return data from a readdirplus() (but not readdir())
* call, increments the lookup count of each returned entry by one
* on success.
*
* @param req request handle
* @param bufv buffer vector
* @param flags flags controlling the copy
* @return zero for success, -errno for failure to send reply
*/
int fuse_reply_data(fuse_req_t req, struct fuse_bufvec *bufv,
enum fuse_buf_copy_flags flags);
/**
* Reply with data vector
*
* Possible requests:
* read, readdir, getxattr, listxattr
*
* @param req request handle
* @param iov the vector containing the data
* @param count the size of vector
* @return zero for success, -errno for failure to send reply
*/
int fuse_reply_iov(fuse_req_t req, const struct iovec *iov, int count);
/**
* Reply with filesystem statistics
*
* Possible requests:
* statfs
*
* @param req request handle
* @param stbuf filesystem statistics
* @return zero for success, -errno for failure to send reply
*/
int fuse_reply_statfs(fuse_req_t req, const struct statvfs *stbuf);
/**
* Reply with needed buffer size
*
* Possible requests:
* getxattr, listxattr
*
* @param req request handle
* @param count the buffer size needed in bytes
* @return zero for success, -errno for failure to send reply
*/
int fuse_reply_xattr(fuse_req_t req, size_t count);
/**
* Reply with file lock information
*
* Possible requests:
* getlk
*
* @param req request handle
* @param lock the lock information
* @return zero for success, -errno for failure to send reply
*/
int fuse_reply_lock(fuse_req_t req, const struct flock *lock);
/**
* Reply with block index
*
* Possible requests:
* bmap
*
* @param req request handle
* @param idx block index within device
* @return zero for success, -errno for failure to send reply
*/
int fuse_reply_bmap(fuse_req_t req, uint64_t idx);
/* ----------------------------------------------------------- *
* Filling a buffer in readdir *
* ----------------------------------------------------------- */
/**
* Add a directory entry to the buffer
*
* Buffer needs to be large enough to hold the entry. If it's not,
* then the entry is not filled in but the size of the entry is still
* returned. The caller can check this by comparing the bufsize
* parameter with the returned entry size. If the entry size is
* larger than the buffer size, the operation failed.
*
* From the 'stbuf' argument the st_ino field and bits 12-15 of the
* st_mode field are used. The other fields are ignored.
*
* Note: offsets do not necessarily represent physical offsets, and
* could be any marker, that enables the implementation to find a
* specific point in the directory stream.
*
* @param req request handle
* @param buf the point where the new entry will be added to the buffer
* @param bufsize remaining size of the buffer
* @param name the name of the entry
* @param stbuf the file attributes
* @param off the offset of the next entry
* @return the space needed for the entry
*/
size_t fuse_add_direntry(fuse_req_t req, char *buf, size_t bufsize,
const char *name, const struct stat *stbuf,
off_t off);
/**
* Add a directory entry to the buffer with the attributes
*
* Buffer needs to be large enough to hold the entry. If it's not,
* then the entry is not filled in but the size of the entry is still
* returned. The caller can check this by comparing the bufsize
* parameter with the returned entry size. If the entry size is
* larger than the buffer size, the operation failed.
*
* From the 'stbuf' argument the st_ino field and bits 12-15 of the
* st_mode field are used. The other fields are ignored.
*
* Note: offsets do not necessarily represent physical offsets, and
* could be any marker, that enables the implementation to find a
* specific point in the directory stream.
*
* @param req request handle
* @param buf the point where the new entry will be added to the buffer
* @param bufsize remaining size of the buffer
* @param name the name of the entry
* @param e the directory entry
* @param off the offset of the next entry
* @return the space needed for the entry
*/
size_t fuse_add_direntry_plus(fuse_req_t req, char *buf, size_t bufsize,
const char *name,
const struct fuse_entry_param *e, off_t off);
/**
* Reply to ask for data fetch and output buffer preparation. ioctl
* will be retried with the specified input data fetched and output
* buffer prepared.
*
* Possible requests:
* ioctl
*
* @param req request handle
* @param in_iov iovec specifying data to fetch from the caller
* @param in_count number of entries in in_iov
* @param out_iov iovec specifying addresses to write output to
* @param out_count number of entries in out_iov
* @return zero for success, -errno for failure to send reply
*/
int fuse_reply_ioctl_retry(fuse_req_t req,
const struct iovec *in_iov, size_t in_count,
const struct iovec *out_iov, size_t out_count);
/**
* Reply to finish ioctl
*
* Possible requests:
* ioctl
*
* @param req request handle
* @param result result to be passed to the caller
* @param buf buffer containing output data
* @param size length of output data
*/
int fuse_reply_ioctl(fuse_req_t req, int result, const void *buf, size_t size);
/**
* Reply to finish ioctl with iov buffer
*
* Possible requests:
* ioctl
*
* @param req request handle
* @param result result to be passed to the caller
* @param iov the vector containing the data
* @param count the size of vector
*/
int fuse_reply_ioctl_iov(fuse_req_t req, int result, const struct iovec *iov,
int count);
/**
* Reply with poll result event mask
*
* @param req request handle
* @param revents poll result event mask
*/
int fuse_reply_poll(fuse_req_t req, unsigned revents);
/* ----------------------------------------------------------- *
* Notification *
* ----------------------------------------------------------- */
/**
* Notify IO readiness event
*
* For more information, please read comment for poll operation.
*
* @param ph poll handle to notify IO readiness event for
*/
int fuse_lowlevel_notify_poll(struct fuse_pollhandle *ph);
/**
* Notify to invalidate cache for an inode
*
* @param ch the channel through which to send the invalidation
* @param ino the inode number
* @param off the offset in the inode where to start invalidating
* or negative to invalidate attributes only
* @param len the amount of cache to invalidate or 0 for all
* @return zero for success, -errno for failure
*/
int fuse_lowlevel_notify_inval_inode(struct fuse_chan *ch, fuse_ino_t ino,
off_t off, off_t len);
/**
* Notify to invalidate parent attributes and the dentry matching
* parent/name
*
* To avoid a deadlock don't call this function from a filesystem operation and
* don't call it with a lock held that can also be held by a filesystem
* operation.
*
* @param ch the channel through which to send the invalidation
* @param parent inode number
* @param name file name
* @param namelen strlen() of file name
* @return zero for success, -errno for failure
*/
int fuse_lowlevel_notify_inval_entry(struct fuse_chan *ch, fuse_ino_t parent,
const char *name, size_t namelen);
/**
* Notify to invalidate parent attributes and delete the dentry matching
* parent/name if the dentry's inode number matches child (otherwise it
* will invalidate the matching dentry).
*
* To avoid a deadlock don't call this function from a filesystem operation and
* don't call it with a lock held that can also be held by a filesystem
* operation.
*
* @param ch the channel through which to send the notification
* @param parent inode number
* @param child inode number
* @param name file name
* @param namelen strlen() of file name
* @return zero for success, -errno for failure
*/
int fuse_lowlevel_notify_delete(struct fuse_chan *ch,
fuse_ino_t parent, fuse_ino_t child,
const char *name, size_t namelen);
/**
* Store data to the kernel buffers
*
* Synchronously store data in the kernel buffers belonging to the
* given inode. The stored data is marked up-to-date (no read will be
* performed against it, unless it's invalidated or evicted from the
* cache).
*
* If the stored data overflows the current file size, then the size
* is extended, similarly to a write(2) on the filesystem.
*
* If this function returns an error, then the store wasn't fully
* completed, but it may have been partially completed.
*
* @param ch the channel through which to send the invalidation
* @param ino the inode number
* @param offset the starting offset into the file to store to
* @param bufv buffer vector
* @param flags flags controlling the copy
* @return zero for success, -errno for failure
*/
int fuse_lowlevel_notify_store(struct fuse_chan *ch, fuse_ino_t ino,
off_t offset, struct fuse_bufvec *bufv,
enum fuse_buf_copy_flags flags);
/**
* Retrieve data from the kernel buffers
*
* Retrieve data in the kernel buffers belonging to the given inode.
* If successful then the retrieve_reply() method will be called with
* the returned data.
*
* Only present pages are returned in the retrieve reply. Retrieving
* stops when it finds a non-present page and only data prior to that is
* returned.
*
* If this function returns an error, then the retrieve will not be
* completed and no reply will be sent.
*
* This function doesn't change the dirty state of pages in the kernel
* buffer. For dirty pages the write() method will be called
* regardless of having been retrieved previously.
*
* @param ch the channel through which to send the invalidation
* @param ino the inode number
* @param size the number of bytes to retrieve
* @param offset the starting offset into the file to retrieve from
* @param cookie user data to supply to the reply callback
* @return zero for success, -errno for failure
*/
int fuse_lowlevel_notify_retrieve(struct fuse_chan *ch, fuse_ino_t ino,
size_t size, off_t offset, void *cookie);
/* ----------------------------------------------------------- *
* Utility functions *
* ----------------------------------------------------------- */
/**
* Get the userdata from the request
*
* @param req request handle
* @return the user data passed to fuse_lowlevel_new()
*/
void *fuse_req_userdata(fuse_req_t req);
/**
* Get the context from the request
*
* The pointer returned by this function will only be valid for the
* request's lifetime
*
* @param req request handle
* @return the context structure
*/
const struct fuse_ctx *fuse_req_ctx(fuse_req_t req);
/**
* Get the current supplementary group IDs for the specified request
*
* Similar to the getgroups(2) system call, except the return value is
* always the total number of group IDs, even if it is larger than the
* specified size.
*
* The current fuse kernel module in linux (as of 2.6.30) doesn't pass
* the group list to userspace, hence this function needs to parse
* "/proc/$TID/task/$TID/status" to get the group IDs.
*
* This feature may not be supported on all operating systems. In
* such a case this function will return -ENOSYS.
*
* @param req request handle
* @param size size of given array
* @param list array of group IDs to be filled in
* @return the total number of supplementary group IDs or -errno on failure
*/
int fuse_req_getgroups(fuse_req_t req, int size, gid_t list[]);
/**
* Callback function for an interrupt
*
* @param req interrupted request
* @param data user data
*/
typedef void (*fuse_interrupt_func_t)(fuse_req_t req, void *data);
/**
* Register/unregister callback for an interrupt
*
* If an interrupt has already happened, then the callback function is
* called from within this function, hence it's not possible for
* interrupts to be lost.
*
* @param req request handle
* @param func the callback function or NULL for unregister
* @param data user data passed to the callback function
*/
void fuse_req_interrupt_func(fuse_req_t req, fuse_interrupt_func_t func,
void *data);
/**
* Check if a request has already been interrupted
*
* @param req request handle
* @return 1 if the request has been interrupted, 0 otherwise
*/
int fuse_req_interrupted(fuse_req_t req);
/* ----------------------------------------------------------- *
* Filesystem setup *
* ----------------------------------------------------------- */
/**
* Create a low level session
*
* @param args argument vector
* @param op the low level filesystem operations
* @param op_size sizeof(struct fuse_lowlevel_ops)
* @param userdata user data
* @return the created session object, or NULL on failure
*
* Example: See hello_ll.c:
* \snippet hello_ll.c doxygen_fuse_lowlevel_usage
*/
struct fuse_session *fuse_lowlevel_new(struct fuse_args *args,
const struct fuse_lowlevel_ops *op,
size_t op_size, void *userdata);
/* ----------------------------------------------------------- *
* Session interface *
* ----------------------------------------------------------- */
/**
* Assign a channel to a session
*
* If a session is destroyed, the assigned channel is also destroyed
*
* @param se the session
* @param ch the channel
*/
void fuse_session_add_chan(struct fuse_session *se, struct fuse_chan *ch);
/**
* Remove the channel from a session
*
* If the channel is not assigned to a session, then this is a no-op
*
* @param ch the channel to remove
*/
void fuse_session_remove_chan(struct fuse_chan *ch);
/**
* Return channel assigned to the session
*
* @param se the session
* @return the channel
*/
struct fuse_chan *fuse_session_chan(struct fuse_session *se);
/**
* Process a raw request supplied in a generic buffer
*
* The fuse_buf may contain a memory buffer or a pipe file descriptor.
*
* @param se the session
* @param buf the fuse_buf containing the request
* @param ch channel on which the request was received
*/
void fuse_session_process_buf(struct fuse_session *se,
const struct fuse_buf *buf, struct fuse_chan *ch);
/**
* Receive a raw request supplied in a generic buffer
*
* The fuse_buf supplied to this function contains a suitably allocated memory
* buffer. This may be overwritten with a file descriptor buffer.
*
* @param se the session
* @param buf the fuse_buf to store the request in
* @param ch the channel
* @return the actual size of the raw request, or -errno on error
*/
int fuse_session_receive_buf(struct fuse_session *se, struct fuse_buf *buf,
struct fuse_chan *ch);
/**
* Destroy a session
*
* @param se the session
*/
void fuse_session_destroy(struct fuse_session *se);
/**
* Exit a session.
*
* This function is invoked by the POSIX signal handlers, when registered using:
* * fuse_set_signal_handlers()
*
* @param se the session
*/
void fuse_session_exit(struct fuse_session *se);
/**
* Reset the exited status of a session
*
* @param se the session
*/
void fuse_session_reset(struct fuse_session *se);
/**
* Query the exited status of a session
*
* @param se the session
* @return 1 if exited, 0 if not exited
*/
int fuse_session_exited(struct fuse_session *se);
/**
* Enter a single threaded, blocking event loop.
*
* Using POSIX signals this event loop can be exited but the session
* needs to be configued by issuing:
* fuse_set_signal_handlers() first.
*
* @param se the session
* @return 0 on success, -1 on error
*/
int fuse_session_loop(struct fuse_session *se);
/**
* Enter a multi-threaded event loop
*
* @param se the session
* @return 0 on success, -1 on error
*/
int fuse_session_loop_mt(struct fuse_session *se);
/* ----------------------------------------------------------- *
* Channel interface *
* ----------------------------------------------------------- */
/**
* Query the file descriptor of the channel
*
* @param ch the channel
* @return the file descriptor passed to fuse_chan_new()
*/
int fuse_chan_fd(struct fuse_chan *ch);
/**
* Destroy a channel
*
* @param ch the channel
*/
void fuse_chan_destroy(struct fuse_chan *ch);
#ifdef __cplusplus
}
#endif
#endif /* _FUSE_LOWLEVEL_H_ */
| gpl-2.0 |
bellinat0r/Epiar | Build/cmake/ThirdParty/MIKMOD/mikmod_build.h | 23879 | /* MikMod sound library
(c) 1998, 1999, 2000 Miodrag Vallat and others - see file AUTHORS
for complete list.
This library is free software; you can redistribute it and/or modify
it under the terms of the GNU Library General Public License as
published by the Free Software Foundation; either version 2 of
the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Library General Public License for more details.
You should have received a copy of the GNU Library General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
02111-1307, USA.
*/
/*==============================================================================
$Id: mikmod_build.h,v 1.1.1.1 2004/06/01 12:16:17 raph Exp $
MikMod sound library include file
==============================================================================*/
#ifndef _MIKMOD_H_
#define _MIKMOD_H_
#include <stdio.h>
#include <stdlib.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* ========== Compiler magic for shared libraries
*/
//#if defined WIN32 && defined _DLL
//#ifdef DLL_EXPORTS
//#define MIKMODAPI __declspec(dllexport)
//#else
//#define MIKMODAPI __declspec(dllimport)
//#endif
//#else
#define MIKMODAPI
//#endif
/*
* ========== Library version
*/
#define LIBMIKMOD_VERSION_MAJOR 3L
#define LIBMIKMOD_VERSION_MINOR 1L
#define LIBMIKMOD_REVISION 10L
#define LIBMIKMOD_VERSION \
((LIBMIKMOD_VERSION_MAJOR<<16)| \
(LIBMIKMOD_VERSION_MINOR<< 8)| \
(LIBMIKMOD_REVISION))
MIKMODAPI extern long MikMod_GetVersion(void);
/*
* ========== Platform independent-type definitions
*/
#ifdef WIN32
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#include <io.h>
#include <mmsystem.h>
#endif
#if defined(__OS2__)||defined(__EMX__)
#define INCL_DOSSEMAPHORES
#include <os2.h>
#else
typedef char CHAR;
#endif
#if defined(__arch64__) || defined(__alpha)
/* 64 bit architectures */
typedef signed char SBYTE; /* 1 byte, signed */
typedef unsigned char UBYTE; /* 1 byte, unsigned */
typedef signed short SWORD; /* 2 bytes, signed */
typedef unsigned short UWORD; /* 2 bytes, unsigned */
typedef signed int SLONG; /* 4 bytes, signed */
typedef unsigned int ULONG; /* 4 bytes, unsigned */
typedef int BOOL; /* 0=false, <>0 true */
#else
/* 32 bit architectures */
typedef signed char SBYTE; /* 1 byte, signed */
typedef unsigned char UBYTE; /* 1 byte, unsigned */
typedef signed short SWORD; /* 2 bytes, signed */
typedef unsigned short UWORD; /* 2 bytes, unsigned */
typedef signed long SLONG; /* 4 bytes, signed */
#if !defined(__OS2__)&&!defined(__EMX__)&&!defined(WIN32)
typedef unsigned long ULONG; /* 4 bytes, unsigned */
typedef int BOOL; /* 0=false, <>0 true */
#endif
#endif
/*
* ========== Error codes
*/
enum {
MMERR_OPENING_FILE = 1,
MMERR_OUT_OF_MEMORY,
MMERR_DYNAMIC_LINKING,
MMERR_SAMPLE_TOO_BIG,
MMERR_OUT_OF_HANDLES,
MMERR_UNKNOWN_WAVE_TYPE,
MMERR_LOADING_PATTERN,
MMERR_LOADING_TRACK,
MMERR_LOADING_HEADER,
MMERR_LOADING_SAMPLEINFO,
MMERR_NOT_A_MODULE,
MMERR_NOT_A_STREAM,
MMERR_MED_SYNTHSAMPLES,
MMERR_ITPACK_INVALID_DATA,
MMERR_DETECTING_DEVICE,
MMERR_INVALID_DEVICE,
MMERR_INITIALIZING_MIXER,
MMERR_OPENING_AUDIO,
MMERR_8BIT_ONLY,
MMERR_16BIT_ONLY,
MMERR_STEREO_ONLY,
MMERR_ULAW,
MMERR_NON_BLOCK,
MMERR_AF_AUDIO_PORT,
MMERR_AIX_CONFIG_INIT,
MMERR_AIX_CONFIG_CONTROL,
MMERR_AIX_CONFIG_START,
MMERR_GUS_SETTINGS,
MMERR_GUS_RESET,
MMERR_GUS_TIMER,
MMERR_HP_SETSAMPLESIZE,
MMERR_HP_SETSPEED,
MMERR_HP_CHANNELS,
MMERR_HP_AUDIO_OUTPUT,
MMERR_HP_AUDIO_DESC,
MMERR_HP_BUFFERSIZE,
MMERR_OSS_SETFRAGMENT,
MMERR_OSS_SETSAMPLESIZE,
MMERR_OSS_SETSTEREO,
MMERR_OSS_SETSPEED,
MMERR_SGI_SPEED,
MMERR_SGI_16BIT,
MMERR_SGI_8BIT,
MMERR_SGI_STEREO,
MMERR_SGI_MONO,
MMERR_SUN_INIT,
MMERR_OS2_MIXSETUP,
MMERR_OS2_SEMAPHORE,
MMERR_OS2_TIMER,
MMERR_OS2_THREAD,
MMERR_DS_PRIORITY,
MMERR_DS_BUFFER,
MMERR_DS_FORMAT,
MMERR_DS_NOTIFY,
MMERR_DS_EVENT,
MMERR_DS_THREAD,
MMERR_DS_UPDATE,
MMERR_WINMM_HANDLE,
MMERR_WINMM_ALLOCATED,
MMERR_WINMM_DEVICEID,
MMERR_WINMM_FORMAT,
MMERR_WINMM_UNKNOWN,
MMERR_MAC_SPEED,
MMERR_MAC_START,
MMERR_MAX
};
/*
* ========== Error handling
*/
typedef void (MikMod_handler)(void);
typedef MikMod_handler *MikMod_handler_t;
MIKMODAPI extern int MikMod_errno;
MIKMODAPI extern BOOL MikMod_critical;
MIKMODAPI extern char *MikMod_strerror(int);
MIKMODAPI extern MikMod_handler_t MikMod_RegisterErrorHandler(MikMod_handler_t);
/*
* ========== Library initialization and core functions
*/
struct MDRIVER;
MIKMODAPI extern void MikMod_RegisterAllDrivers(void);
MIKMODAPI extern CHAR* MikMod_InfoDriver(void);
MIKMODAPI extern void MikMod_RegisterDriver(struct MDRIVER*);
MIKMODAPI extern int MikMod_DriverFromAlias(CHAR*);
MIKMODAPI extern BOOL MikMod_Init(CHAR*);
MIKMODAPI extern void MikMod_Exit(void);
MIKMODAPI extern BOOL MikMod_Reset(CHAR*);
MIKMODAPI extern BOOL MikMod_SetNumVoices(int,int);
MIKMODAPI extern BOOL MikMod_Active(void);
MIKMODAPI extern BOOL MikMod_EnableOutput(void);
MIKMODAPI extern void MikMod_DisableOutput(void);
MIKMODAPI extern void MikMod_Update(void);
MIKMODAPI extern BOOL MikMod_InitThreads(void);
MIKMODAPI extern void MikMod_Lock(void);
MIKMODAPI extern void MikMod_Unlock(void);
/*
* ========== Reader, Writer
*/
typedef struct MREADER {
BOOL (*Seek)(struct MREADER*,long,int);
long (*Tell)(struct MREADER*);
BOOL (*Read)(struct MREADER*,void*,size_t);
int (*Get)(struct MREADER*);
BOOL (*Eof)(struct MREADER*);
} MREADER;
typedef struct MWRITER {
BOOL (*Seek)(struct MWRITER*,long,int);
long (*Tell)(struct MWRITER*);
BOOL (*Write)(struct MWRITER*,void*,size_t);
BOOL (*Put)(struct MWRITER*,int);
} MWRITER;
/*
* ========== Samples
*/
/* Sample playback should not be interrupted */
#define SFX_CRITICAL 1
/* Sample format [loading and in-memory] flags: */
#define SF_16BITS 0x0001
#define SF_STEREO 0x0002
#define SF_SIGNED 0x0004
#define SF_BIG_ENDIAN 0x0008
#define SF_DELTA 0x0010
#define SF_ITPACKED 0x0020
#define SF_FORMATMASK 0x003F
/* General Playback flags */
#define SF_LOOP 0x0100
#define SF_BIDI 0x0200
#define SF_REVERSE 0x0400
#define SF_SUSTAIN 0x0800
#define SF_PLAYBACKMASK 0x0C00
/* Module-only Playback Flags */
#define SF_OWNPAN 0x1000
#define SF_UST_LOOP 0x2000
#define SF_EXTRAPLAYBACKMASK 0x3000
/* Panning constants */
#define PAN_LEFT 0
#define PAN_HALFLEFT 64
#define PAN_CENTER 128
#define PAN_HALFRIGHT 192
#define PAN_RIGHT 255
#define PAN_SURROUND 512 /* panning value for Dolby Surround */
typedef struct SAMPLE {
SWORD panning; /* panning (0-255 or PAN_SURROUND) */
ULONG speed; /* Base playing speed/frequency of note */
UBYTE volume; /* volume 0-64 */
UWORD inflags; /* sample format on disk */
UWORD flags; /* sample format in memory */
ULONG length; /* length of sample (in samples!) */
ULONG loopstart; /* repeat position (relative to start, in samples) */
ULONG loopend; /* repeat end */
ULONG susbegin; /* sustain loop begin (in samples) \ Not Supported */
ULONG susend; /* sustain loop end / Yet! */
/* Variables used by the module player only! (ignored for sound effects) */
UBYTE globvol; /* global volume */
UBYTE vibflags; /* autovibrato flag stuffs */
UBYTE vibtype; /* Vibratos moved from INSTRUMENT to SAMPLE */
UBYTE vibsweep;
UBYTE vibdepth;
UBYTE vibrate;
CHAR* samplename; /* name of the sample */
/* Values used internally only */
UWORD avibpos; /* autovibrato pos [player use] */
UBYTE divfactor; /* for sample scaling, maintains proper period slides */
ULONG seekpos; /* seek position in file */
SWORD handle; /* sample handle used by individual drivers */
} SAMPLE;
/* Sample functions */
MIKMODAPI extern SAMPLE *Sample_Load(CHAR*);
MIKMODAPI extern SAMPLE *Sample_LoadFP(FILE*);
MIKMODAPI extern SAMPLE *Sample_LoadGeneric(MREADER*);
MIKMODAPI extern void Sample_Free(SAMPLE*);
MIKMODAPI extern SBYTE Sample_Play(SAMPLE*,ULONG,UBYTE);
MIKMODAPI extern void Voice_SetVolume(SBYTE,UWORD);
MIKMODAPI extern UWORD Voice_GetVolume(SBYTE);
MIKMODAPI extern void Voice_SetFrequency(SBYTE,ULONG);
MIKMODAPI extern ULONG Voice_GetFrequency(SBYTE);
MIKMODAPI extern void Voice_SetPanning(SBYTE,ULONG);
MIKMODAPI extern ULONG Voice_GetPanning(SBYTE);
MIKMODAPI extern void Voice_Play(SBYTE,SAMPLE*,ULONG);
MIKMODAPI extern void Voice_Stop(SBYTE);
MIKMODAPI extern BOOL Voice_Stopped(SBYTE);
MIKMODAPI extern SLONG Voice_GetPosition(SBYTE);
MIKMODAPI extern ULONG Voice_RealVolume(SBYTE);
/*
* ========== Internal module representation (UniMod)
*/
/*
Instrument definition - for information only, the only field which may be
of use in user programs is the name field
*/
/* Instrument note count */
#define INSTNOTES 120
/* Envelope point */
typedef struct ENVPT {
SWORD pos;
SWORD val;
} ENVPT;
/* Envelope point count */
#define ENVPOINTS 32
/* Instrument structure */
typedef struct INSTRUMENT {
CHAR* insname;
UBYTE flags;
UWORD samplenumber[INSTNOTES];
UBYTE samplenote[INSTNOTES];
UBYTE nnatype;
UBYTE dca; /* duplicate check action */
UBYTE dct; /* duplicate check type */
UBYTE globvol;
UWORD volfade;
SWORD panning; /* instrument-based panning var */
UBYTE pitpansep; /* pitch pan separation (0 to 255) */
UBYTE pitpancenter; /* pitch pan center (0 to 119) */
UBYTE rvolvar; /* random volume varations (0 - 100%) */
UBYTE rpanvar; /* random panning varations (0 - 100%) */
/* volume envelope */
UBYTE volflg; /* bit 0: on 1: sustain 2: loop */
UBYTE volpts;
UBYTE volsusbeg;
UBYTE volsusend;
UBYTE volbeg;
UBYTE volend;
ENVPT volenv[ENVPOINTS];
/* panning envelope */
UBYTE panflg; /* bit 0: on 1: sustain 2: loop */
UBYTE panpts;
UBYTE pansusbeg;
UBYTE pansusend;
UBYTE panbeg;
UBYTE panend;
ENVPT panenv[ENVPOINTS];
/* pitch envelope */
UBYTE pitflg; /* bit 0: on 1: sustain 2: loop */
UBYTE pitpts;
UBYTE pitsusbeg;
UBYTE pitsusend;
UBYTE pitbeg;
UBYTE pitend;
ENVPT pitenv[ENVPOINTS];
} INSTRUMENT;
struct MP_CONTROL;
struct MP_VOICE;
/*
Module definition
*/
/* maximum master channels supported */
#define UF_MAXCHAN 64
/* Module flags */
#define UF_XMPERIODS 0x0001 /* XM periods / finetuning */
#define UF_LINEAR 0x0002 /* LINEAR periods (UF_XMPERIODS must be set) */
#define UF_INST 0x0004 /* Instruments are used */
#define UF_NNA 0x0008 /* IT: NNA used, set numvoices rather
than numchn */
#define UF_S3MSLIDES 0x0010 /* uses old S3M volume slides */
#define UF_BGSLIDES 0x0020 /* continue volume slides in the background */
#define UF_HIGHBPM 0x0040 /* MED: can use >255 bpm */
#define UF_NOWRAP 0x0080 /* XM-type (i.e. illogical) pattern break
semantics */
#define UF_ARPMEM 0x0100 /* IT: need arpeggio memory */
#define UF_FT2QUIRKS 0x0200 /* emulate some FT2 replay quirks */
#define UF_PANNING 0x0400 /* module uses panning effects or have
non-tracker default initial panning */
typedef struct MODULE {
/* general module information */
CHAR* songname; /* name of the song */
CHAR* modtype; /* string type of module loaded */
CHAR* comment; /* module comments */
UWORD flags; /* See module flags above */
UBYTE numchn; /* number of module channels */
UBYTE numvoices; /* max # voices used for full NNA playback */
UWORD numpos; /* number of positions in this song */
UWORD numpat; /* number of patterns in this song */
UWORD numins; /* number of instruments */
UWORD numsmp; /* number of samples */
struct INSTRUMENT* instruments; /* all instruments */
struct SAMPLE* samples; /* all samples */
UBYTE realchn; /* real number of channels used */
UBYTE totalchn; /* total number of channels used (incl NNAs) */
/* playback settings */
UWORD reppos; /* restart position */
UBYTE initspeed; /* initial song speed */
UWORD inittempo; /* initial song tempo */
UBYTE initvolume; /* initial global volume (0 - 128) */
UWORD panning[UF_MAXCHAN]; /* panning positions */
UBYTE chanvol[UF_MAXCHAN]; /* channel positions */
UWORD bpm; /* current beats-per-minute speed */
UWORD sngspd; /* current song speed */
SWORD volume; /* song volume (0-128) (or user volume) */
BOOL extspd; /* extended speed flag (default enabled) */
BOOL panflag; /* panning flag (default enabled) */
BOOL wrap; /* wrap module ? (default disabled) */
BOOL loop; /* allow module to loop ? (default enabled) */
BOOL fadeout; /* volume fade out during last pattern */
UWORD patpos; /* current row number */
SWORD sngpos; /* current song position */
ULONG sngtime; /* current song time in 2^-10 seconds */
SWORD relspd; /* relative speed factor */
/* internal module representation */
UWORD numtrk; /* number of tracks */
UBYTE** tracks; /* array of numtrk pointers to tracks */
UWORD* patterns; /* array of Patterns */
UWORD* pattrows; /* array of number of rows for each pattern */
UWORD* positions; /* all positions */
BOOL forbid; /* if true, no player update! */
UWORD numrow; /* number of rows on current pattern */
UWORD vbtick; /* tick counter (counts from 0 to sngspd) */
UWORD sngremainder;/* used for song time computation */
struct MP_CONTROL* control; /* Effects Channel info (size pf->numchn) */
struct MP_VOICE* voice; /* Audio Voice information (size md_numchn) */
UBYTE globalslide; /* global volume slide rate */
UBYTE pat_repcrazy;/* module has just looped to position -1 */
UWORD patbrk; /* position where to start a new pattern */
UBYTE patdly; /* patterndelay counter (command memory) */
UBYTE patdly2; /* patterndelay counter (real one) */
SWORD posjmp; /* flag to indicate a jump is needed... */
UWORD bpmlimit; /* threshold to detect bpm or speed values */
} MODULE;
/*
* ========== Module loaders
*/
struct MLOADER;
MIKMODAPI extern CHAR* MikMod_InfoLoader(void);
MIKMODAPI extern void MikMod_RegisterAllLoaders(void);
MIKMODAPI extern void MikMod_RegisterLoader(struct MLOADER*);
MIKMODAPI extern struct MLOADER load_669; /* 669 and Extended-669 (by Tran/Renaissance) */
MIKMODAPI extern struct MLOADER load_amf; /* DMP Advanced Module Format (by Otto Chrons) */
MIKMODAPI extern struct MLOADER load_dsm; /* DSIK internal module format */
MIKMODAPI extern struct MLOADER load_far; /* Farandole Composer (by Daniel Potter) */
MIKMODAPI extern struct MLOADER load_gdm; /* General DigiMusic (by Edward Schlunder) */
MIKMODAPI extern struct MLOADER load_it; /* Impulse Tracker (by Jeffrey Lim) */
MIKMODAPI extern struct MLOADER load_imf; /* Imago Orpheus (by Lutz Roeder) */
MIKMODAPI extern struct MLOADER load_med; /* Amiga MED modules (by Teijo Kinnunen) */
MIKMODAPI extern struct MLOADER load_m15; /* Soundtracker 15-instrument */
MIKMODAPI extern struct MLOADER load_mod; /* Standard 31-instrument Module loader */
MIKMODAPI extern struct MLOADER load_mtm; /* Multi-Tracker Module (by Renaissance) */
MIKMODAPI extern struct MLOADER load_okt; /* Amiga Oktalyzer */
MIKMODAPI extern struct MLOADER load_stm; /* ScreamTracker 2 (by Future Crew) */
MIKMODAPI extern struct MLOADER load_stx; /* STMIK 0.2 (by Future Crew) */
MIKMODAPI extern struct MLOADER load_s3m; /* ScreamTracker 3 (by Future Crew) */
MIKMODAPI extern struct MLOADER load_ult; /* UltraTracker (by MAS) */
MIKMODAPI extern struct MLOADER load_uni; /* MikMod and APlayer internal module format */
MIKMODAPI extern struct MLOADER load_xm; /* FastTracker 2 (by Triton) */
/*
* ========== Module player
*/
MIKMODAPI extern MODULE* Player_Load(CHAR*,int,BOOL);
MIKMODAPI extern MODULE* Player_LoadFP(FILE*,int,BOOL);
MIKMODAPI extern MODULE* Player_LoadGeneric(MREADER*,int,BOOL);
MIKMODAPI extern CHAR* Player_LoadTitle(CHAR*);
MIKMODAPI extern CHAR* Player_LoadTitleFP(FILE*);
MIKMODAPI extern void Player_Free(MODULE*);
MIKMODAPI extern void Player_Start(MODULE*);
MIKMODAPI extern BOOL Player_Active(void);
MIKMODAPI extern void Player_Stop(void);
MIKMODAPI extern void Player_TogglePause(void);
MIKMODAPI extern BOOL Player_Paused(void);
MIKMODAPI extern void Player_NextPosition(void);
MIKMODAPI extern void Player_PrevPosition(void);
MIKMODAPI extern void Player_SetPosition(UWORD);
MIKMODAPI extern BOOL Player_Muted(UBYTE);
MIKMODAPI extern void Player_SetVolume(SWORD);
MIKMODAPI extern MODULE* Player_GetModule(void);
MIKMODAPI extern void Player_SetSpeed(UWORD);
MIKMODAPI extern void Player_SetTempo(UWORD);
MIKMODAPI extern void Player_Unmute(SLONG,...);
MIKMODAPI extern void Player_Mute(SLONG,...);
MIKMODAPI extern void Player_ToggleMute(SLONG,...);
MIKMODAPI extern int Player_GetChannelVoice(UBYTE);
MIKMODAPI extern UWORD Player_GetChannelPeriod(UBYTE);
typedef void (MikMod_player)(void);
typedef MikMod_player *MikMod_player_t;
MIKMODAPI extern MikMod_player_t MikMod_RegisterPlayer(MikMod_player_t);
#define MUTE_EXCLUSIVE 32000
#define MUTE_INCLUSIVE 32001
/*
* ========== Drivers
*/
enum {
MD_MUSIC = 0,
MD_SNDFX
};
enum {
MD_HARDWARE = 0,
MD_SOFTWARE
};
/* Mixing flags */
/* These ones take effect only after MikMod_Init or MikMod_Reset */
#define DMODE_16BITS 0x0001 /* enable 16 bit output */
#define DMODE_STEREO 0x0002 /* enable stereo output */
#define DMODE_SOFT_SNDFX 0x0004 /* Process sound effects via software mixer */
#define DMODE_SOFT_MUSIC 0x0008 /* Process music via software mixer */
#define DMODE_HQMIXER 0x0010 /* Use high-quality (slower) software mixer */
/* These take effect immediately. */
#define DMODE_SURROUND 0x0100 /* enable surround sound */
#define DMODE_INTERP 0x0200 /* enable interpolation */
#define DMODE_REVERSE 0x0400 /* reverse stereo */
struct SAMPLOAD;
typedef struct MDRIVER {
struct MDRIVER* next;
CHAR* Name;
CHAR* Version;
UBYTE HardVoiceLimit; /* Limit of hardware mixer voices */
UBYTE SoftVoiceLimit; /* Limit of software mixer voices */
CHAR* Alias;
void (*CommandLine) (CHAR*);
BOOL (*IsPresent) (void);
SWORD (*SampleLoad) (struct SAMPLOAD*,int);
void (*SampleUnload) (SWORD);
ULONG (*FreeSampleSpace) (int);
ULONG (*RealSampleLength) (int,struct SAMPLE*);
BOOL (*Init) (void);
void (*Exit) (void);
BOOL (*Reset) (void);
BOOL (*SetNumVoices) (void);
BOOL (*PlayStart) (void);
void (*PlayStop) (void);
void (*Update) (void);
void (*Pause) (void);
void (*VoiceSetVolume) (UBYTE,UWORD);
UWORD (*VoiceGetVolume) (UBYTE);
void (*VoiceSetFrequency)(UBYTE,ULONG);
ULONG (*VoiceGetFrequency)(UBYTE);
void (*VoiceSetPanning) (UBYTE,ULONG);
ULONG (*VoiceGetPanning) (UBYTE);
void (*VoicePlay) (UBYTE,SWORD,ULONG,ULONG,ULONG,ULONG,UWORD);
void (*VoiceStop) (UBYTE);
BOOL (*VoiceStopped) (UBYTE);
SLONG (*VoiceGetPosition) (UBYTE);
ULONG (*VoiceRealVolume) (UBYTE);
} MDRIVER;
/* These variables can be changed at ANY time and results will be immediate */
MIKMODAPI extern UBYTE md_volume; /* global sound volume (0-128) */
MIKMODAPI extern UBYTE md_musicvolume; /* volume of song */
MIKMODAPI extern UBYTE md_sndfxvolume; /* volume of sound effects */
MIKMODAPI extern UBYTE md_reverb; /* 0 = none; 15 = chaos */
MIKMODAPI extern UBYTE md_pansep; /* 0 = mono; 128 == 100% (full left/right) */
/* The variables below can be changed at any time, but changes will not be
implemented until MikMod_Reset is called. A call to MikMod_Reset may result
in a skip or pop in audio (depending on the soundcard driver and the settings
changed). */
MIKMODAPI extern UWORD md_device; /* device */
MIKMODAPI extern UWORD md_mixfreq; /* mixing frequency */
MIKMODAPI extern UWORD md_mode; /* mode. See DMODE_? flags above */
/* The following variable should not be changed! */
MIKMODAPI extern MDRIVER* md_driver; /* Current driver in use. */
/* Known drivers list */
MIKMODAPI extern struct MDRIVER drv_nos; /* no sound */
MIKMODAPI extern struct MDRIVER drv_pipe; /* piped output */
MIKMODAPI extern struct MDRIVER drv_raw; /* raw file disk writer [music.raw] */
MIKMODAPI extern struct MDRIVER drv_stdout; /* output to stdout */
MIKMODAPI extern struct MDRIVER drv_wav; /* RIFF WAVE file disk writer [music.wav] */
MIKMODAPI extern struct MDRIVER drv_ultra; /* Linux Ultrasound driver */
MIKMODAPI extern struct MDRIVER drv_sam9407; /* Linux sam9407 driver */
MIKMODAPI extern struct MDRIVER drv_AF; /* Dec Alpha AudioFile */
MIKMODAPI extern struct MDRIVER drv_aix; /* AIX audio device */
MIKMODAPI extern struct MDRIVER drv_alsa; /* Advanced Linux Sound Architecture (ALSA) */
MIKMODAPI extern struct MDRIVER drv_esd; /* Enlightened sound daemon (EsounD) */
MIKMODAPI extern struct MDRIVER drv_hp; /* HP-UX audio device */
MIKMODAPI extern struct MDRIVER drv_oss; /* OpenSound System (Linux,FreeBSD...) */
MIKMODAPI extern struct MDRIVER drv_sgi; /* SGI audio library */
MIKMODAPI extern struct MDRIVER drv_sun; /* Sun/NetBSD/OpenBSD audio device */
MIKMODAPI extern struct MDRIVER drv_dart; /* OS/2 Direct Audio RealTime */
MIKMODAPI extern struct MDRIVER drv_os2; /* OS/2 MMPM/2 */
MIKMODAPI extern struct MDRIVER drv_ds; /* Win32 DirectSound driver */
MIKMODAPI extern struct MDRIVER drv_win; /* Win32 multimedia API driver */
MIKMODAPI extern struct MDRIVER drv_mac; /* Macintosh Sound Manager driver */
/*========== Virtual channel mixer interface (for user-supplied drivers only) */
MIKMODAPI extern BOOL VC_Init(void);
MIKMODAPI extern void VC_Exit(void);
MIKMODAPI extern BOOL VC_SetNumVoices(void);
MIKMODAPI extern ULONG VC_SampleSpace(int);
MIKMODAPI extern ULONG VC_SampleLength(int,SAMPLE*);
MIKMODAPI extern BOOL VC_PlayStart(void);
MIKMODAPI extern void VC_PlayStop(void);
MIKMODAPI extern SWORD VC_SampleLoad(struct SAMPLOAD*,int);
MIKMODAPI extern void VC_SampleUnload(SWORD);
MIKMODAPI extern ULONG VC_WriteBytes(SBYTE*,ULONG);
MIKMODAPI extern ULONG VC_SilenceBytes(SBYTE*,ULONG);
MIKMODAPI extern void VC_VoiceSetVolume(UBYTE,UWORD);
MIKMODAPI extern UWORD VC_VoiceGetVolume(UBYTE);
MIKMODAPI extern void VC_VoiceSetFrequency(UBYTE,ULONG);
MIKMODAPI extern ULONG VC_VoiceGetFrequency(UBYTE);
MIKMODAPI extern void VC_VoiceSetPanning(UBYTE,ULONG);
MIKMODAPI extern ULONG VC_VoiceGetPanning(UBYTE);
MIKMODAPI extern void VC_VoicePlay(UBYTE,SWORD,ULONG,ULONG,ULONG,ULONG,UWORD);
MIKMODAPI extern void VC_VoiceStop(UBYTE);
MIKMODAPI extern BOOL VC_VoiceStopped(UBYTE);
MIKMODAPI extern SLONG VC_VoiceGetPosition(UBYTE);
MIKMODAPI extern ULONG VC_VoiceRealVolume(UBYTE);
#ifdef __cplusplus
}
#endif
#endif
/* ex:set ts=4: */
| gpl-2.0 |
GeoCat/QGIS | python/plugins/processing/algs/qgis/Rasterize.py | 10479 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
Rasterize.py
-------------------
begin : 2016-10-05
copyright : (C) 2016 by OPENGIS.ch
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
from qgis.PyQt.QtGui import QImage, QPainter
from qgis.PyQt.QtCore import QSize
from qgis.core import (
QgsMapSettings,
QgsMapRendererCustomPainterJob,
QgsRectangle,
QgsProject,
QgsProcessingException,
QgsProcessingParameterExtent,
QgsProcessingParameterString,
QgsProcessingParameterNumber,
QgsProcessingParameterMapLayer,
QgsProcessingParameterRasterDestination,
QgsRasterFileWriter
)
import qgis
import osgeo.gdal
import os
import tempfile
import math
__author__ = 'Matthias Kuhn'
__date__ = '2016-10-05'
__copyright__ = '(C) 2016 by OPENGIS.ch'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
class RasterizeAlgorithm(QgisAlgorithm):
"""Processing algorithm renders map canvas to a raster file.
It's possible to choose the following parameters:
- Map theme to render
- Layer to render
- The minimum extent to render
- The tile size
- Map unit per pixel
- The output (can be saved to a file or to a temporary file and
automatically opened as layer in qgis)
"""
# Constants used to refer to parameters and outputs. They will be
# used when calling the algorithm from another algorithm, or when
# calling from the QGIS console.
OUTPUT = 'OUTPUT'
MAP_THEME = 'MAP_THEME'
LAYER = 'LAYER'
EXTENT = 'EXTENT'
TILE_SIZE = 'TILE_SIZE'
MAP_UNITS_PER_PIXEL = 'MAP_UNITS_PER_PIXEL'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
"""Here we define the inputs and output of the algorithm, along
with some other properties.
"""
# The parameters
self.addParameter(
QgsProcessingParameterExtent(self.EXTENT, description=self.tr(
'Minimum extent to render')))
self.addParameter(
QgsProcessingParameterNumber(
self.TILE_SIZE,
self.tr('Tile size'),
defaultValue=1024, minValue=64))
self.addParameter(QgsProcessingParameterNumber(
self.MAP_UNITS_PER_PIXEL,
self.tr(
'Map units per '
'pixel'),
defaultValue=100,
minValue=0,
type=QgsProcessingParameterNumber.Double
))
map_theme_param = QgsProcessingParameterString(
self.MAP_THEME,
description=self.tr(
'Map theme to render'),
defaultValue=None, optional=True)
map_theme_param.setMetadata(
{'widget_wrapper': {
'class':
'processing.gui.wrappers_map_theme.MapThemeWrapper'}})
self.addParameter(map_theme_param)
self.addParameter(
QgsProcessingParameterMapLayer(
self.LAYER,
description=self.tr(
'Single layer to render'),
optional=True))
# We add a raster layer as output
self.addParameter(QgsProcessingParameterRasterDestination(
self.OUTPUT,
self.tr(
'Output layer')))
def name(self):
# Unique (non-user visible) name of algorithm
return 'rasterize'
def displayName(self):
# The name that the user will see in the toolbox
return self.tr('Convert map to raster')
def group(self):
return self.tr('Raster tools')
def tags(self):
return self.tr('layer,raster,convert,file,map themes,tiles,render').split(',')
# def processAlgorithm(self, progress):
def processAlgorithm(self, parameters, context, feedback):
"""Here is where the processing itself takes place."""
# The first thing to do is retrieve the values of the parameters
# entered by the user
map_theme = self.parameterAsString(
parameters,
self.MAP_THEME,
context)
layer = self.parameterAsLayer(
parameters,
self.LAYER,
context)
extent = self.parameterAsExtent(
parameters,
self.EXTENT,
context)
tile_size = self.parameterAsInt(
parameters,
self.TILE_SIZE,
context)
mupp = self.parameterAsDouble(
parameters,
self.MAP_UNITS_PER_PIXEL,
context)
output_layer = self.parameterAsOutputLayer(
parameters,
self.OUTPUT,
context)
tile_set = TileSet(map_theme, layer, extent, tile_size, mupp,
output_layer,
qgis.utils.iface.mapCanvas().mapSettings())
tile_set.render(feedback)
return {self.OUTPUT: output_layer}
class TileSet():
"""
A set of tiles
"""
def __init__(self, map_theme, layer, extent, tile_size, mupp, output,
map_settings):
"""
:param map_theme:
:param extent:
:param layer:
:param tile_size:
:param mupp:
:param output:
:param map_settings: Map canvas map settings used for some fallback
values and CRS
"""
self.extent = extent
self.mupp = mupp
self.tile_size = tile_size
driver = self.getDriverForFile(output)
if not driver:
raise QgsProcessingException(
u'Could not load GDAL driver for file {}'.format(output))
crs = map_settings.destinationCrs()
self.x_tile_count = math.ceil(extent.width() / mupp / tile_size)
self.y_tile_count = math.ceil(extent.height() / mupp / tile_size)
xsize = self.x_tile_count * tile_size
ysize = self.y_tile_count * tile_size
self.dataset = driver.Create(output, xsize, ysize, 3) # 3 bands
self.dataset.SetProjection(str(crs.toWkt()))
self.dataset.SetGeoTransform(
[extent.xMinimum(), mupp, 0, extent.yMaximum(), 0, -mupp])
self.image = QImage(QSize(tile_size, tile_size), QImage.Format_ARGB32)
self.settings = QgsMapSettings()
self.settings.setOutputDpi(self.image.logicalDpiX())
self.settings.setOutputImageFormat(QImage.Format_ARGB32)
self.settings.setDestinationCrs(crs)
self.settings.setOutputSize(self.image.size())
self.settings.setFlag(QgsMapSettings.Antialiasing, True)
self.settings.setFlag(QgsMapSettings.RenderMapTile, True)
if QgsProject.instance().mapThemeCollection().hasMapTheme(map_theme):
self.settings.setLayers(
QgsProject.instance().mapThemeCollection(
).mapThemeVisibleLayers(
map_theme))
self.settings.setLayerStyleOverrides(
QgsProject.instance().mapThemeCollection(
).mapThemeStyleOverrides(
map_theme))
elif layer:
self.settings.setLayers([layer])
else:
self.settings.setLayers(map_settings.layers())
def render(self, feedback):
for x in range(self.x_tile_count):
for y in range(self.y_tile_count):
if feedback.isCanceled():
return
cur_tile = x * self.y_tile_count + y
num_tiles = self.x_tile_count * self.y_tile_count
self.renderTile(x, y, feedback)
feedback.setProgress(int((cur_tile / num_tiles) * 100))
def renderTile(self, x, y, feedback):
"""
Render one tile
:param x: The x index of the current tile
:param y: The y index of the current tile
"""
painter = QPainter(self.image)
self.settings.setExtent(QgsRectangle(
self.extent.xMinimum() + x * self.mupp * self.tile_size,
self.extent.yMaximum() - (y + 1) * self.mupp * self.tile_size,
self.extent.xMinimum() + (x + 1) * self.mupp * self.tile_size,
self.extent.yMaximum() - y * self.mupp * self.tile_size))
job = QgsMapRendererCustomPainterJob(self.settings, painter)
job.renderSynchronously()
painter.end()
# Needs not to be deleted or Windows will kill it too early...
tmpfile = tempfile.NamedTemporaryFile(suffix='.png', delete=False)
try:
self.image.save(tmpfile.name)
src_ds = osgeo.gdal.Open(tmpfile.name)
self.dataset.WriteRaster(x * self.tile_size, y * self.tile_size,
self.tile_size, self.tile_size,
src_ds.ReadRaster(0, 0, self.tile_size,
self.tile_size))
except Exception as e:
feedback.reportError(str(e))
finally:
del src_ds
tmpfile.close()
os.unlink(tmpfile.name)
def getDriverForFile(self, filename):
"""
Get the GDAL driver for a filename, based on its extension. (.gpkg,
.mbtiles...)
"""
_, extension = os.path.splitext(filename)
# If no extension is set, use .tif as default
if extension == '':
extension = '.tif'
driver_name = QgsRasterFileWriter.driverForExtension(extension[1:])
return osgeo.gdal.GetDriverByName(driver_name)
| gpl-2.0 |
Curter29/Vanilla-mod | applications/vanilla/modules/class.bookmarkedmodule.php | 2066 | <?php if (!defined('APPLICATION')) exit();
/*
Copyright 2008, 2009 Vanilla Forums Inc.
This file is part of Garden.
Garden is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
Garden is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with Garden. If not, see <http://www.gnu.org/licenses/>.
Contact Vanilla Forums Inc. at support [at] vanillaforums [dot] com
*/
/**
* Renders recently active bookmarked discussions
*/
class BookmarkedModule extends Gdn_Module {
public function GetData($Limit = 10) {
$this->Data = FALSE;
if (Gdn::Session()->IsValid() && C('Vanilla.Modules.ShowBookmarkedModule', TRUE)) {
$BookmarkIDs = Gdn::SQL()
->Select('DiscussionID')
->From('UserDiscussion')
->Where('UserID', Gdn::Session()->UserID)
->Where('Bookmarked', 1)
->Get()->ResultArray();
$BookmarkIDs = ConsolidateArrayValuesByKey($BookmarkIDs, 'DiscussionID');
if (count($BookmarkIDs)) {
$DiscussionModel = new DiscussionModel();
DiscussionModel::CategoryPermissions();
$DiscussionModel->SQL->WhereIn('d.DiscussionID', $BookmarkIDs);
$this->Data = $DiscussionModel->Get(
0,
$Limit
);
} else {
$this->Data = FALSE;
}
}
}
public function AssetTarget() {
return 'Panel';
}
public function ToString() {
if (!isset($this->Data))
$this->GetData();
if (is_object($this->Data) && $this->Data->NumRows() > 0)
return parent::ToString();
return '';
}
} | gpl-2.0 |
judelvis/jpa-garte | system/js/panel/principal.js | 2919 | /**
* Desarrollado por: Judelvis Antonio Rivas Perdomo
* Fecha Creacion: 09 de Noviembre de 2014
*/
$(function() {
$('#tabs').tabs();
listar_pendientes();
listar_procesando();
listar_procesado();
listar_rechazo_cliente();
listar_rechazo_admin();
});
function listar_pendientes(){
var datos = "estatus=0&panel=1";
$("#resp1").html('');
alert(sUrlP);
$.ajax({
url : sUrlP + "listar_pedidos_pendientes",
data: datos,
type : "POST",
dataType : "json",
success : function(json) {//alert(json);
if(json['resp']==1){
var Grid1 = new TGrid(json, 'resp1','Pedidos Pendientes por Depositar');
Grid1.SetNumeracion(true);
Grid1.SetName("PDepositar");
Grid1.SetDetalle();
Grid1.Generar();
}else $("#resp1").html("No posee Pedidos Pendientes por Depositar");
}
});
}
function listar_procesando(){
var datos = "estatus=1";
$("#resp2").html('');
//alert(sUrlP + "listar_pedidos_cliente");
$.ajax({
url : sUrlP + "listar_pedidos_cliente",
data: datos,
type : "POST",
dataType : "json",
success : function(json) {//alert(json);
if(json['resp']==1){
var Grid2 = new TGrid(json, 'resp2','Pedidos Pendientes por Aprobar');
Grid2.SetNumeracion(true);
Grid2.SetName("procesando");
Grid2.SetDetalle();
Grid2.Generar();
}else $("#resp2").html("No posee Pedidos Pendientes por Aprobar");
}
});
}
function listar_procesado(){
var datos = "estatus=2";
$("#resp3").html('');
alert(sUrlP);
$.ajax({
url : sUrlP + "listar_pedidos_cliente",
data: datos,
type : "POST",
dataType : "json",
success : function(json) {//alert(json);
if(json['resp']==1){
var Grid3 = new TGrid(json, 'resp3','Pedidos Aprobados');
Grid3.SetNumeracion(true);
Grid3.SetName("Procesado");
Grid3.SetDetalle();
Grid3.Generar();
}else $("#resp3").html("No posee Pedidos Aprobados");
}
});
}
function listar_rechazo_cliente(){
var datos = "estatus=3";
$("#resp4").html('');
$.ajax({
url : sUrlP + "listar_pedidos_cliente",
data: datos,
type : "POST",
dataType : "json",
success : function(json) {//alert(json);
if(json['resp']==1){
var Grid4 = new TGrid(json, 'resp4','Pedidos Rechazados Por Cliente');
Grid4.SetNumeracion(true);
Grid4.SetName("Rcliente");
Grid4.SetDetalle();
Grid4.Generar();
}else $("#resp4").html("No posee Pedidos Rechazados por Cliente");
}
});
}
function listar_rechazo_admin(){
var datos = "estatus=4";
$("#resp5").html('');
$.ajax({
url : sUrlP + "listar_pedidos_cliente",
data: datos,
type : "POST",
dataType : "json",
success : function(json) {//alert(json);
if(json['resp']==1){
var Grid5 = new TGrid(json, 'resp5','Pedidos rechazados por administrador');
Grid5.SetNumeracion(true);
Grid5.SetName("Radmin");
Grid5.SetDetalle();
Grid5.Generar();
}else $("#resp5").html("No posee Pedidos Rechazados por Administrador");
}
});
} | gpl-2.0 |
nachandr/cfme_tests | cfme/cloud/host_aggregates.py | 6202 | """ Page functions for Host Aggregates pages
"""
import attr
from navmazing import NavigateToAttribute
from widgetastic_patternfly import BootstrapNav
from widgetastic_patternfly import BreadCrumb
from widgetastic_patternfly import Button
from widgetastic_patternfly import Dropdown
from widgetastic_patternfly import View
from cfme.base.ui import BaseLoggedInPage
from cfme.common import Taggable
from cfme.common import TaggableCollection
from cfme.exceptions import ItemNotFound
from cfme.modeling.base import BaseCollection
from cfme.modeling.base import BaseEntity
from cfme.utils.appliance.implementations.ui import CFMENavigateStep
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.appliance.implementations.ui import navigator
from cfme.utils.providers import get_crud_by_name
from widgetastic_manageiq import Accordion
from widgetastic_manageiq import BaseEntitiesView
from widgetastic_manageiq import ItemsToolBarViewSelector
from widgetastic_manageiq import ManageIQTree
from widgetastic_manageiq import PaginationPane
from widgetastic_manageiq import Search
from widgetastic_manageiq import SummaryTable
from widgetastic_manageiq import Text
class HostAggregatesView(BaseLoggedInPage):
@property
def in_host_aggregates(self):
return (
self.logged_in_as_current_user and
self.navigation.currently_selected == ['Compute', 'Clouds', 'Host Aggregates']
)
class HostAggregatesToolBar(View):
policy = Dropdown('Policy')
download = Dropdown('Download')
configuration = Dropdown('Configuration')
view_selector = View.nested(ItemsToolBarViewSelector)
class HostAggregatesEntities(BaseEntitiesView):
pass
class HostAggregatesDetailsToolBar(View):
policy = Dropdown('Policy')
download = Button(title='Print or export summary')
configuration = Dropdown('Configuration')
class HostAggregatesDetailsAccordion(View):
@View.nested
class properties(Accordion): # noqa
tree = ManageIQTree()
@View.nested
class relationships(Accordion): # noqa
tree = ManageIQTree()
class HostAggregatesDetailsEntities(View):
breadcrumb = BreadCrumb()
title = Text('//div[@id="main-content"]//h1')
properties = SummaryTable(title='Properties')
relationships = SummaryTable(title='Relationships')
smart_management = SummaryTable(title='Smart Management')
class HostAggregatesAllView(HostAggregatesView):
toolbar = HostAggregatesToolBar()
paginator = PaginationPane()
search = View.nested(Search)
including_entities = View.include(HostAggregatesEntities, use_parent=True)
@View.nested
class my_filters(Accordion): # noqa
ACCORDION_NAME = "My Filters"
navigation = BootstrapNav('.//div/ul')
tree = ManageIQTree()
@property
def is_displayed(self):
return (
self.in_host_aggregates and
self.entities.title.text == 'Host Aggregates')
class HostAggregatesDetailsView(HostAggregatesView):
@property
def is_displayed(self):
obj = self.context['object']
return (
self.in_host_aggregates and
self.entities.title.text == obj.expected_details_title and
self.entities.breadcrumb.active_location == obj.expected_details_breadcrumb and
self.entities.relationships.get_text_of('Cloud Provider') == obj.provider.name
)
toolbar = HostAggregatesDetailsToolBar()
sidebar = HostAggregatesDetailsAccordion()
entities = HostAggregatesDetailsEntities()
@attr.s
class HostAggregates(BaseEntity, Taggable):
"""
Host Aggregates class to support navigation
"""
_param_name = "HostAggregate"
name = attr.ib()
provider = attr.ib()
ram = attr.ib(default=None)
vcpus = attr.ib(default=None)
disk = attr.ib(default=None)
swap = attr.ib(default=None)
rxtx = attr.ib(default=None)
is_public = attr.ib(default=True)
tenant = attr.ib(default=None)
def refresh(self):
"""Refresh provider relationships and browser"""
self.provider.refresh_provider_relationships()
self.browser.refresh()
@property
def instance_count(self):
""" number of instances using host aggregates.
Returns:
:py:class:`int` instance count.
"""
view = navigate_to(self, 'Details')
return int(view.entities.relationships.get_text_of('Instances'))
@attr.s
class HostAggregatesCollection(BaseCollection, TaggableCollection):
ENTITY = HostAggregates
def all(self):
provider = self.filters.get('provider') # None if no filter, need for entity instantiation
view = navigate_to(self, 'All')
result = []
flavors = view.entities.get_all(surf_pages=True)
for flavor in flavors:
if provider is not None:
if flavor.data['cloud_provider'] == provider.name:
entity = self.instantiate(flavor.data['name'], provider)
else:
entity = self.instantiate(flavor.data['name'],
get_crud_by_name(flavor.data['cloud_provider']))
result.append(entity)
return result
@navigator.register(HostAggregatesCollection, 'All')
class HostAggregatesAll(CFMENavigateStep):
VIEW = HostAggregatesAllView
prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn')
def step(self, *args, **kwargs):
self.prerequisite_view.navigation.select('Compute', 'Clouds', 'Host Aggregates')
@navigator.register(HostAggregates, 'Details')
class HostAggregatesDetails(CFMENavigateStep):
VIEW = HostAggregatesDetailsView
prerequisite = NavigateToAttribute('parent', 'All')
def step(self, *args, **kwargs):
self.prerequisite_view.toolbar.view_selector.select('List View')
try:
row = self.prerequisite_view.entities.get_entity(name=self.obj.name, surf_pages=True)
except ItemNotFound:
raise ItemNotFound('Could not locate host aggregate "{}" on provider {}'
.format(self.obj.name, self.obj.provider.name))
row.click()
| gpl-2.0 |
xohm/SimpleOpenNI | dist/all/SimpleOpenNI/documentation/SimpleOpenNI/Vec3f.html | 8678 | <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<!-- Generated by javadoc (version 1.6.0_27) on Thu Aug 29 15:21:00 CEST 2013 -->
<title>Vec3f</title>
<meta name="date" content="2013-08-29">
<link rel="stylesheet" type="text/css" href="../stylesheet.css" title="Style">
</head>
<body>
<script type="text/javascript"><!--
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="Vec3f";
}
//-->
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar_top">
<!-- -->
</a><a href="#skip-navbar_top" title="Skip navigation links"></a><a name="navbar_top_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../SimpleOpenNI/package-summary.html">Package</a></li>
<li class="navBarCell1Rev">Class</li>
<li><a href="package-tree.html">Tree</a></li>
<li><a href="../index-all.html">Index</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li><a href="../SimpleOpenNI/StrVector.html" title="class in SimpleOpenNI"><span class="strong">PREV CLASS</span></a></li>
<li>NEXT CLASS</li>
</ul>
<ul class="navList">
<li><a href="../index.html?SimpleOpenNI/Vec3f.html" target="_top">FRAMES</a></li>
<li><a href="Vec3f.html" target="_top">NO FRAMES</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<div>
<ul class="subNavList">
<li>SUMMARY: </li>
<li>NESTED | </li>
<li>FIELD | </li>
<li><a href="#constructor_summary">CONSTR</a> | </li>
<li><a href="#method_summary">METHOD</a></li>
</ul>
<ul class="subNavList">
<li>DETAIL: </li>
<li>FIELD | </li>
<li><a href="#constructor_detail">CONSTR</a> | </li>
<li><a href="#method_detail">METHOD</a></li>
</ul>
</div>
<a name="skip-navbar_top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<!-- ======== START OF CLASS DATA ======== -->
<div class="header">
<p class="subTitle">SimpleOpenNI</p>
<h2 title="Class Vec3f" class="title">Class Vec3f</h2>
</div>
<div class="contentContainer">
<ul class="inheritance">
<li>java.lang.Object</li>
<li>
<ul class="inheritance">
<li>SimpleOpenNI.Vec3f</li>
</ul>
</li>
</ul>
<div class="description">
<ul class="blockList">
<li class="blockList">
<hr>
<br>
<pre>public class <strong>Vec3f</strong>
extends java.lang.Object</pre>
</li>
</ul>
</div>
<div class="summary">
<ul class="blockList">
<li class="blockList">
<!-- ======== CONSTRUCTOR SUMMARY ======== -->
<ul class="blockList">
<li class="blockList"><a name="constructor_summary">
<!-- -->
</a>
<h3>Constructor Summary</h3>
<table class="overviewSummary" border="0" cellpadding="3" cellspacing="0" summary="Constructor Summary table, listing constructors, and an explanation">
<caption><span>Constructors</span><span class="tabEnd"> </span></caption>
<tr>
<th class="colOne" scope="col">Constructor and Description</th>
</tr>
<tr class="altColor">
<td class="colOne"><code><strong><a href="../SimpleOpenNI/Vec3f.html#Vec3f(float, float, float)">Vec3f</a></strong>(float x,
float y,
float z)</code> </td>
</tr>
<tr class="rowColor">
<td class="colOne"><code><strong><a href="../SimpleOpenNI/Vec3f.html#Vec3f(SimpleOpenNI.Vec3f)">Vec3f</a></strong>(<a href="../SimpleOpenNI/Vec3f.html" title="class in SimpleOpenNI">Vec3f</a> copy)</code> </td>
</tr>
</table>
</li>
</ul>
<!-- ========== METHOD SUMMARY =========== -->
<ul class="blockList">
<li class="blockList"><a name="method_summary">
<!-- -->
</a>
<h3>Method Summary</h3>
<table class="overviewSummary" border="0" cellpadding="3" cellspacing="0" summary="Method Summary table, listing methods, and an explanation">
<caption><span>Methods</span><span class="tabEnd"> </span></caption>
<tr>
<th class="colFirst" scope="col">Modifier and Type</th>
<th class="colLast" scope="col">Method and Description</th>
</tr>
<tr class="altColor">
<td class="colFirst"><code>void</code></td>
<td class="colLast"><code><strong><a href="../SimpleOpenNI/Vec3f.html#delete()">delete</a></strong>()</code> </td>
</tr>
<tr class="rowColor">
<td class="colFirst"><code>float</code></td>
<td class="colLast"><code><strong><a href="../SimpleOpenNI/Vec3f.html#x()">x</a></strong>()</code> </td>
</tr>
<tr class="altColor">
<td class="colFirst"><code>float</code></td>
<td class="colLast"><code><strong><a href="../SimpleOpenNI/Vec3f.html#y()">y</a></strong>()</code> </td>
</tr>
<tr class="rowColor">
<td class="colFirst"><code>float</code></td>
<td class="colLast"><code><strong><a href="../SimpleOpenNI/Vec3f.html#z()">z</a></strong>()</code> </td>
</tr>
</table>
<ul class="blockList">
<li class="blockList"><a name="methods_inherited_from_class_java.lang.Object">
<!-- -->
</a>
<h3>Methods inherited from class java.lang.Object</h3>
<code>equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait</code></li>
</ul>
</li>
</ul>
</li>
</ul>
</div>
<div class="details">
<ul class="blockList">
<li class="blockList">
<!-- ========= CONSTRUCTOR DETAIL ======== -->
<ul class="blockList">
<li class="blockList"><a name="constructor_detail">
<!-- -->
</a>
<h3>Constructor Detail</h3>
<a name="Vec3f(float, float, float)">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>Vec3f</h4>
<pre>public Vec3f(float x,
float y,
float z)</pre>
</li>
</ul>
<a name="Vec3f(SimpleOpenNI.Vec3f)">
<!-- -->
</a>
<ul class="blockListLast">
<li class="blockList">
<h4>Vec3f</h4>
<pre>public Vec3f(<a href="../SimpleOpenNI/Vec3f.html" title="class in SimpleOpenNI">Vec3f</a> copy)</pre>
</li>
</ul>
</li>
</ul>
<!-- ============ METHOD DETAIL ========== -->
<ul class="blockList">
<li class="blockList"><a name="method_detail">
<!-- -->
</a>
<h3>Method Detail</h3>
<a name="delete()">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>delete</h4>
<pre>public void delete()</pre>
</li>
</ul>
<a name="x()">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>x</h4>
<pre>public float x()</pre>
</li>
</ul>
<a name="y()">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>y</h4>
<pre>public float y()</pre>
</li>
</ul>
<a name="z()">
<!-- -->
</a>
<ul class="blockListLast">
<li class="blockList">
<h4>z</h4>
<pre>public float z()</pre>
</li>
</ul>
</li>
</ul>
</li>
</ul>
</div>
</div>
<!-- ========= END OF CLASS DATA ========= -->
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar_bottom">
<!-- -->
</a><a href="#skip-navbar_bottom" title="Skip navigation links"></a><a name="navbar_bottom_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../SimpleOpenNI/package-summary.html">Package</a></li>
<li class="navBarCell1Rev">Class</li>
<li><a href="package-tree.html">Tree</a></li>
<li><a href="../index-all.html">Index</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li><a href="../SimpleOpenNI/StrVector.html" title="class in SimpleOpenNI"><span class="strong">PREV CLASS</span></a></li>
<li>NEXT CLASS</li>
</ul>
<ul class="navList">
<li><a href="../index.html?SimpleOpenNI/Vec3f.html" target="_top">FRAMES</a></li>
<li><a href="Vec3f.html" target="_top">NO FRAMES</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<div>
<ul class="subNavList">
<li>SUMMARY: </li>
<li>NESTED | </li>
<li>FIELD | </li>
<li><a href="#constructor_summary">CONSTR</a> | </li>
<li><a href="#method_summary">METHOD</a></li>
</ul>
<ul class="subNavList">
<li>DETAIL: </li>
<li>FIELD | </li>
<li><a href="#constructor_detail">CONSTR</a> | </li>
<li><a href="#method_detail">METHOD</a></li>
</ul>
</div>
<a name="skip-navbar_bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
</body>
</html>
| gpl-2.0 |
cyjseagull/SHMA | zsim-nvmain/pin_kit/source/tools/SimpleExamples/flowgraph.py | 13531 | #! /usr/bin/env python
# @ORIGINAL_AUTHOR: Robert Muth
#
# python.org has useful info about the Python programming language
#
# The Python library is described here: http://docs.python.org/lib/lib.html
# An the index for the library here: http://docs.python.org/lib/genindex.html
import sys
import os
import getopt
import re
import string
import copy
#######################################################################
# Version
#######################################################################
def Version():
(l,v,x) = string.split('$Revision: 1.5 $')
return v
#######################################################################
# Usage
#######################################################################
def Usage():
print "Usage: flowgraph.py [OPTION]+ assembler-listing edge-profile"
print
print "flowgraph converts a disassembled routine into a flowgraph which can be rendered using vcg"
print
print "assembler-listing is a textual disassembler listing generated with"
print "objdump-routine.csh or directly with objdump"
print
print "edge-profile is a profile generated with the edgcnt Pin tool"
return -1
#######################################################################
# Messages
#######################################################################
def Info(str):
print >> sys.stderr,"I:",str
return
def Warning(str):
print >> sys.stderr,"W:", str
return
def Error(str):
print >> sys.stderr, "E:",str
sys.exit(-1)
#######################################################################
#
#######################################################################
# 402d05: 41 56 push %r14
PatternNoFallthrough = re.compile(r'call|ret|jmp')
PatternCall = re.compile(r'call')
class INS:
def __init__(self, addr, opcode ):
self._addr = addr
self._opcode = opcode
self._next = None
self._leader = 0
self._bbl = None
return
def get_opcode(self):
return self._opcode
def set_next(self,next):
self._next = next
return
def get_next(self):
return self._next
def get_addr(self):
return self._addr
def get_leader(self):
return self._leader
def set_leader(self,leader):
self._leader = leader
def get_bbl(self):
return self._bbl
def set_bbl(self,bbl):
self._bbl = bbl
def has_no_fallthru(self):
return PatternNoFallthrough.search(self._opcode)
def is_call(self):
return PatternCall.search(self._opcode)
#######################################################################
##
#######################################################################
ALL_INS = {}
PatternAssemler = re.compile(r'^\s*([0-9a-fA-F]+):\s*(?:[0-9a-fA-F][0-9a-fA-F] )+\s*(.+)$')
def ProcessAssemblerListing(lines):
last_ins = None
for l in lines:
match = PatternAssemler.match(l)
if not match:
# print "bad line ",l
continue
addr = long(match.group(1),16)
ins = INS( addr, match.group(2) )
ALL_INS[addr] = ins
if last_ins:
last_ins.set_next(ins)
last_ins = ins
return
#######################################################################
# 0x0000000000400366 0x0000000000402300 2182
PatternEdge2 = re.compile(r'^\s*0x([0-9a-fA-F]+)\s+0x([0-9a-fA-F]+)\s+([0-9]+)\s*$')
PatternEdge3 = re.compile(r'^\s*0x([0-9a-fA-F]+)\s+0x([0-9a-fA-F]+)\s+([a-zA-Z])\s+([0-9]+)\s*$')
def ProcessEdgProfile(lines):
version = string.split(lines[0])
if version[0] != "EDGCOUNT":
Error("files is not an edge profile")
if version[1] == "2.0":
v = 2
elif version[1] == "3.0":
v = 3
else:
Error("unsupported edge profile version")
edg_list = []
for l in lines[1:]:
if v == 2:
match = PatternEdge2.match(l)
elif v==3:
match = PatternEdge3.match(l)
if not match: continue
if v == 2:
src = long(match.group(1),16)
dst = long(match.group(2),16)
count = long(match.group(3))
type = "u"
elif v == 3:
src = long(match.group(1),16)
dst = long(match.group(2),16)
type = match.group(3)
count = long(match.group(4))
if ALL_INS.has_key(src):
next = ALL_INS[src].get_next()
if next: next.set_leader(1)
if ALL_INS.has_key(dst):
ins = ALL_INS[dst]
ins.set_leader(1)
if ALL_INS.has_key(src) or ALL_INS.has_key(dst):
edg_list.append( (src,dst,count,type) )
return edg_list
#######################################################################
#
#######################################################################
class EDG:
def __init__(self,src,dst,count, type):
self._src = src
self._dst = dst
self._count = count
self._type = type
return
def is_fallthru(self):
return self._fallthru
def StringVCG(self, threshold = 100000000000L):
s = ""
if self._count > threshold:
s += "\t" + "nearedge:\n"
else:
s += "\t" + "edge:\n"
s += "\t{\n"
s += "\t\t" + "sourcename: \"" + hex(self._src._start) + "\"\n"
s += "\t\t" + "targetname: \"" + hex(self._dst._start) + "\"\n"
if self._type == "F" or self._type == "L":
s += "\t\t" + "thickness: 4\n"
else:
s += "\t\t" + "thickness: 2\n"
s += "\t\t" + "label: \"%s(%d)\"\n" % (self._type,self._count)
# s += "\t\t" + "priority: %d\n" % self._count
s += "\t}\n"
return s
#######################################################################
class BBL:
def __init__(self,start):
self._start = start
self._ins = []
self._in = []
self._out = []
self._count = 0
self._in_count = 0
self._out_count = 0
self._next = None
return
def add_ins(self,ins):
self._ins.append(ins)
self._end = ins.get_addr()
return
def set_count(self,count):
assert( self._count == 0 )
self._count = count
return
def add_out_edg(self, edg ):
self._out.append(edg)
return
def add_in_edg(self, edg ):
self._in.append(edg)
return
def add_in_count(self, count ):
self._in_count += count
return
def add_out_count(self, count ):
self._out_count += count
return
def count_in(self):
count = self._in_count
for e in self._in: count += e._count
return count
def count_out(self):
count = self._out_count
for e in self._out: count += e._count
return count
def set_next(self,next):
self._next = next
return
def get_next(self):
return self._next
def get_start(self):
return self._start
def is_call(self):
return self._ins[-1].is_call()
def has_no_fallthru(self):
return self._ins[-1].has_no_fallthru()
def String(self):
s = "BBL at %x count %d (i: %d o: %d)\n" % (self._start, self._count, self._in_count, self._out_count)
s += "i: "
for edg in self._in:
s += "%x (%d) " % (edg._src.get_start(),edg._count)
s += "\n"
s += "o: "
for edg in self._out:
s += "%x (%d) " % (edg._dst.get_start(),edg._count)
s += "\n"
for ins in self._ins:
s += "%x %s\n" % (ins.get_addr(),ins.get_opcode())
return s
def StringVCG(self,threshold=1000):
s = "\t" + "node:\n"
s += "\t" + "{\n"
if self._count > threshold:
s += "\t\t" + "color: red\n"
s += "\t\t" + "title: \"" + hex(self._start) + "\"\n"
s += "\t\t" + "label: \"" + hex(self._start) + " (" + str(self._count) + ")\\n"
for ins in self._ins: s += "%x: %s\\n" % (ins.get_addr(),ins.get_opcode())
s += "\"\n"
s += "\t" + "}\n"
return s
#######################################################################
#
#######################################################################
ALL_BBL = {}
ALL_EDG = []
#######################################################################
#
#######################################################################
def CreateCFG(edg_list):
no_interproc_edges = 1
ins_list = ALL_INS.items()
ins_list.sort() # by addr
bbl_list = []
Info("BBL create")
last = None
for (a,ins) in ins_list:
if ins.get_leader():
start = ins.get_addr()
bbl = BBL(start)
bbl_list.append(bbl)
ALL_BBL[start] = bbl
if last: last.set_next( bbl )
last = bbl
last.add_ins( ins )
ins.set_bbl( last )
if ins.has_no_fallthru():
next = ins.get_next()
if next: next.set_leader(1)
Info( "Created %d bbls" % len(bbl_list))
# for bbl in bbl_list: print bbl.String()
Info( "EDG create")
for (src,dst,count,type) in edg_list:
if ALL_INS.has_key(src):
bbl_src = ALL_INS[src].get_bbl()
else:
assert( ALL_BBL.has_key(dst) )
if no_interproc_edges:
ALL_BBL[dst].add_in_count(count)
continue
bbl_src = BBL(src)
ALL_BBL[src] = bbl_src
if ALL_BBL.has_key(dst):
bbl_dst = ALL_BBL[dst]
else:
if no_interproc_edges:
bbl_src.add_out_count(count)
continue
bbl_dst = BBL(dst)
ALL_BBL[dst] = bbl_dst
edg = EDG( bbl_src, bbl_dst, count, type)
ALL_EDG.append( edg )
bbl_src.add_out_edg( edg )
bbl_dst.add_in_edg( edg )
Info("propagate counts and add fallthrus")
for bbl in bbl_list:
count = bbl.count_in()
bbl.set_count(count)
count -= bbl.count_out()
if count < 0:
Warning("negative fallthru count")
count = 0
next = bbl.get_next()
if count > 0:
if bbl.has_no_fallthru():
Info("losing flow %d\n" % count)
elif next:
edg = EDG(bbl,next,count,"F")
ALL_EDG.append( edg )
bbl.add_out_edg( edg )
next.add_in_edg( edg )
if bbl.is_call() and next:
edg = EDG(bbl,next, 0,"L")
ALL_EDG.append( edg )
bbl.add_out_edg( edg )
next.add_in_edg( edg )
# for bbl in bbl_list: print bbl.String()
return bbl_list
def DumpVCG():
start = 0
end = 0
print "// ###################################################################################"
print "// VCG Flowgraph for %x - %x" % (start,end)
print "// ###################################################################################"
print "graph:"
print "{";
print "title: \"Control Flow Graph for rtn %x - %x \"" % (start,end);
print "label: \"Control Flow Graph for rtn %x - %x \"" % (start,end);
print "display_edge_labels: yes"
print "layout_downfactor: 100"
print "layout_nearfactor: 10"
print "layout_upfactor: 1"
# print "dirty_edge_labels: yes"
print "layout_algorithm: mindepth"
print "manhatten_edges: yes"
print "edge.arrowsize: 15"
print "late_edge_labels: yes"
for e in ALL_EDG:
print e.StringVCG()
bbl_list = ALL_BBL.items()
bbl_list.sort()
for (x,b) in bbl_list:
print b.StringVCG()
print "}";
print "// eof"
return
#######################################################################
# Main
#######################################################################
def Main(argv):
if len(argv) != 2:
Usage()
return -1
Info( "Reading listing")
filename = argv[0]
try:
input = open(filename, "r")
lines = input.readlines()
input.close()
except:
Error("cannot read data " + filename)
ProcessAssemblerListing(lines)
Info( "Reading edges")
filename = argv[1]
try:
input = open(filename, "r")
lines = input.readlines()
input.close()
except:
Error("cannot read data " + filename)
edg_list = ProcessEdgProfile(lines)
Info("Read %d edges" % len(edg_list))
bbl_list = CreateCFG( edg_list)
Info("Dump VCG to stdout")
DumpVCG()
return 0
#######################################################################
#
#######################################################################
if __name__ == "__main__":
sys.exit( Main( sys.argv[1:]) )
#######################################################################
# eof
#######################################################################
| gpl-2.0 |
erykrutkowski/Steghide-GUI | steghide-src/JpegSampleValue.cc | 2573 | /*
* steghide 0.5.1 - a steganography program
* Copyright (C) 1999-2003 Stefan Hetzl <[email protected]>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
*/
#include <cstdlib>
#include <cmath>
#include "common.h"
#include "JpegSampleValue.h"
JpegSampleValue::JpegSampleValue (int c)
: SampleValue(), DctCoeff (c)
{
Key = (UWORD32) DctCoeff ;
EValue = calcEValue (DctCoeff) ;
}
SampleValue *JpegSampleValue::getNearestTargetSampleValue (EmbValue t) const
{
SWORD16 minvalue = 0, maxvalue = 0 ;
if (DctCoeff > 0) {
minvalue = 1 ;
maxvalue = SWORD16_MAX ;
}
else if (DctCoeff < 0) {
minvalue = SWORD16_MIN ;
maxvalue = -1 ;
}
else {
myassert(false) ;
}
SWORD16 dctc_up = DctCoeff, dctc_down = DctCoeff, dctc_new = 0 ;
bool found = false ;
do {
if (dctc_up < maxvalue) {
dctc_up++ ;
}
if (dctc_down > minvalue) {
dctc_down-- ;
}
if (calcEValue(dctc_up) == t && calcEValue(dctc_down) == t) {
if (RndSrc.getBool()) {
dctc_new = dctc_up ;
}
else {
dctc_new = dctc_down ;
}
found = true ;
}
else if (calcEValue(dctc_up) == t) {
dctc_new = dctc_up ;
found = true ;
}
else if (calcEValue(dctc_down) == t) {
dctc_new = dctc_down ;
found = true ;
}
} while (!found) ;
return ((SampleValue *) new JpegSampleValue (dctc_new)) ;
}
UWORD32 JpegSampleValue::calcDistance (const SampleValue *s) const
{
const JpegSampleValue *sample = (const JpegSampleValue*) s ;
/* If s is not a JpegSampleValue then we get into real trouble here.
But calcDistance is called very often, a dynamic_cast costs a lot of time and
it does not make sense to pass anything but a JpegSampleValue as s anyway. */
int d = DctCoeff - sample->DctCoeff ;
return ((d >= 0) ? ((UWORD32) d) : ((UWORD32) -d)) ;
}
std::string JpegSampleValue::getName (void) const
{
char buf[128] ;
sprintf (buf, "%d", DctCoeff) ;
return std::string (buf) ;
}
| gpl-2.0 |
gui2dev/android_kernel_motorola_tinboost | net/wireless/lib80211.c | 7087 | /*
* lib80211 -- common bits for IEEE802.11 drivers
*
* Copyright(c) 2008 John W. Linville <[email protected]>
*
* Portions copied from old ieee80211 component, w/ original copyright
* notices below:
*
* Host AP crypto routines
*
* Copyright (c) 2002-2003, Jouni Malinen <[email protected]>
* Portions Copyright (C) 2004, Intel Corporation <[email protected]>
*
*/
#include <linux/module.h>
#include <linux/ctype.h>
#include <linux/ieee80211.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <net/lib80211.h>
#define DRV_NAME "lib80211"
#define DRV_DESCRIPTION "common routines for IEEE802.11 drivers"
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_AUTHOR("John W. Linville <[email protected]>");
MODULE_LICENSE("GPL");
struct lib80211_crypto_alg {
struct list_head list;
struct lib80211_crypto_ops *ops;
};
static LIST_HEAD(lib80211_crypto_algs);
static DEFINE_SPINLOCK(lib80211_crypto_lock);
const char *print_ssid(char *buf, const char *ssid, u8 ssid_len)
{
const char *s = ssid;
char *d = buf;
ssid_len = min_t(u8, ssid_len, IEEE80211_MAX_SSID_LEN);
while (ssid_len--) {
if (isprint(*s)) {
*d++ = *s++;
continue;
}
*d++ = '\\';
if (*s == '\0')
*d++ = '0';
else if (*s == '\n')
*d++ = 'n';
else if (*s == '\r')
*d++ = 'r';
else if (*s == '\t')
*d++ = 't';
else if (*s == '\\')
*d++ = '\\';
else
d += snprintf(d, 3, "%03o", *s);
s++;
}
*d = '\0';
return buf;
}
EXPORT_SYMBOL(print_ssid);
int lib80211_crypt_info_init(struct lib80211_crypt_info *info, char *name,
spinlock_t *lock)
{
memset(info, 0, sizeof(*info));
info->name = name;
info->lock = lock;
INIT_LIST_HEAD(&info->crypt_deinit_list);
setup_timer(&info->crypt_deinit_timer, lib80211_crypt_deinit_handler,
(unsigned long)info);
return 0;
}
EXPORT_SYMBOL(lib80211_crypt_info_init);
void lib80211_crypt_info_free(struct lib80211_crypt_info *info)
{
int i;
lib80211_crypt_quiescing(info);
del_timer_sync(&info->crypt_deinit_timer);
lib80211_crypt_deinit_entries(info, 1);
for (i = 0; i < NUM_WEP_KEYS; i++) {
struct lib80211_crypt_data *crypt = info->crypt[i];
if (crypt) {
if (crypt->ops) {
crypt->ops->deinit(crypt->priv);
module_put(crypt->ops->owner);
}
kfree(crypt);
info->crypt[i] = NULL;
}
}
}
EXPORT_SYMBOL(lib80211_crypt_info_free);
void lib80211_crypt_deinit_entries(struct lib80211_crypt_info *info, int force)
{
struct lib80211_crypt_data *entry, *next;
unsigned long flags;
spin_lock_irqsave(info->lock, flags);
list_for_each_entry_safe(entry, next, &info->crypt_deinit_list, list) {
if (atomic_read(&entry->refcnt) != 0 && !force)
continue;
list_del(&entry->list);
if (entry->ops) {
entry->ops->deinit(entry->priv);
module_put(entry->ops->owner);
}
kfree(entry);
}
spin_unlock_irqrestore(info->lock, flags);
}
EXPORT_SYMBOL(lib80211_crypt_deinit_entries);
/* After this, crypt_deinit_list won't accept new members */
void lib80211_crypt_quiescing(struct lib80211_crypt_info *info)
{
unsigned long flags;
spin_lock_irqsave(info->lock, flags);
info->crypt_quiesced = 1;
spin_unlock_irqrestore(info->lock, flags);
}
EXPORT_SYMBOL(lib80211_crypt_quiescing);
void lib80211_crypt_deinit_handler(unsigned long data)
{
struct lib80211_crypt_info *info = (struct lib80211_crypt_info *)data;
unsigned long flags;
lib80211_crypt_deinit_entries(info, 0);
spin_lock_irqsave(info->lock, flags);
if (!list_empty(&info->crypt_deinit_list) && !info->crypt_quiesced) {
printk(KERN_DEBUG "%s: entries remaining in delayed crypt "
"deletion list\n", info->name);
info->crypt_deinit_timer.expires = jiffies + HZ;
add_timer(&info->crypt_deinit_timer);
}
spin_unlock_irqrestore(info->lock, flags);
}
EXPORT_SYMBOL(lib80211_crypt_deinit_handler);
void lib80211_crypt_delayed_deinit(struct lib80211_crypt_info *info,
struct lib80211_crypt_data **crypt)
{
struct lib80211_crypt_data *tmp;
unsigned long flags;
if (*crypt == NULL)
return;
tmp = *crypt;
*crypt = NULL;
/* must not run ops->deinit() while there may be pending encrypt or
* decrypt operations. Use a list of delayed deinits to avoid needing
* locking. */
spin_lock_irqsave(info->lock, flags);
if (!info->crypt_quiesced) {
list_add(&tmp->list, &info->crypt_deinit_list);
if (!timer_pending(&info->crypt_deinit_timer)) {
info->crypt_deinit_timer.expires = jiffies + HZ;
add_timer(&info->crypt_deinit_timer);
}
}
spin_unlock_irqrestore(info->lock, flags);
}
EXPORT_SYMBOL(lib80211_crypt_delayed_deinit);
int lib80211_register_crypto_ops(struct lib80211_crypto_ops *ops)
{
unsigned long flags;
struct lib80211_crypto_alg *alg;
alg = kzalloc(sizeof(*alg), GFP_KERNEL);
if (alg == NULL)
return -ENOMEM;
alg->ops = ops;
spin_lock_irqsave(&lib80211_crypto_lock, flags);
list_add(&alg->list, &lib80211_crypto_algs);
spin_unlock_irqrestore(&lib80211_crypto_lock, flags);
printk(KERN_DEBUG "lib80211_crypt: registered algorithm '%s'\n",
ops->name);
return 0;
}
EXPORT_SYMBOL(lib80211_register_crypto_ops);
int lib80211_unregister_crypto_ops(struct lib80211_crypto_ops *ops)
{
struct lib80211_crypto_alg *alg;
unsigned long flags;
spin_lock_irqsave(&lib80211_crypto_lock, flags);
list_for_each_entry(alg, &lib80211_crypto_algs, list) {
if (alg->ops == ops)
goto found;
}
spin_unlock_irqrestore(&lib80211_crypto_lock, flags);
return -EINVAL;
found:
printk(KERN_DEBUG "lib80211_crypt: unregistered algorithm '%s'\n",
ops->name);
list_del(&alg->list);
spin_unlock_irqrestore(&lib80211_crypto_lock, flags);
kfree(alg);
return 0;
}
EXPORT_SYMBOL(lib80211_unregister_crypto_ops);
struct lib80211_crypto_ops *lib80211_get_crypto_ops(const char *name)
{
struct lib80211_crypto_alg *alg;
unsigned long flags;
spin_lock_irqsave(&lib80211_crypto_lock, flags);
list_for_each_entry(alg, &lib80211_crypto_algs, list) {
if (strcmp(alg->ops->name, name) == 0)
goto found;
}
spin_unlock_irqrestore(&lib80211_crypto_lock, flags);
return NULL;
found:
spin_unlock_irqrestore(&lib80211_crypto_lock, flags);
return alg->ops;
}
EXPORT_SYMBOL(lib80211_get_crypto_ops);
static void *lib80211_crypt_null_init(int keyidx)
{
return (void *)1;
}
static void lib80211_crypt_null_deinit(void *priv)
{
}
static struct lib80211_crypto_ops lib80211_crypt_null = {
.name = "NULL",
.init = lib80211_crypt_null_init,
.deinit = lib80211_crypt_null_deinit,
.owner = THIS_MODULE,
};
static int __init lib80211_init(void)
{
pr_info(DRV_DESCRIPTION "\n");
return lib80211_register_crypto_ops(&lib80211_crypt_null);
}
static void __exit lib80211_exit(void)
{
lib80211_unregister_crypto_ops(&lib80211_crypt_null);
BUG_ON(!list_empty(&lib80211_crypto_algs));
}
module_init(lib80211_init);
module_exit(lib80211_exit);
| gpl-2.0 |
mattstock/binutils-bexkat1 | bfd/hpux-core.c | 13408 | /* BFD back-end for HP/UX core files.
Copyright (C) 1993-2020 Free Software Foundation, Inc.
Written by Stu Grossman, Cygnus Support.
Converted to back-end form by Ian Lance Taylor, Cygnus SUpport
This file is part of BFD, the Binary File Descriptor library.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
MA 02110-1301, USA. */
/* This file can only be compiled on systems which use HP/UX style
core files. */
#include "sysdep.h"
#include "bfd.h"
#include "libbfd.h"
#if defined (HOST_HPPAHPUX) || defined (HOST_HPPAMPEIX)
/* FIXME: sys/core.h doesn't exist for HPUX version 7. HPUX version
5, 6, and 7 core files seem to be standard trad-core.c type core
files; can we just use trad-core.c in addition to this file? */
#include <sys/core.h>
#include <sys/utsname.h>
#endif /* HOST_HPPAHPUX */
#ifdef HOST_HPPABSD
/* Not a very swift place to put it, but that's where the BSD port
puts them. */
#include "/hpux/usr/include/sys/core.h"
#endif /* HOST_HPPABSD */
#include <sys/param.h>
#ifdef HAVE_DIRENT_H
# include <dirent.h>
#else
# ifdef HAVE_SYS_NDIR_H
# include <sys/ndir.h>
# endif
# ifdef HAVE_SYS_DIR_H
# include <sys/dir.h>
# endif
# ifdef HAVE_NDIR_H
# include <ndir.h>
# endif
#endif
#include <signal.h>
#ifdef HPUX_CORE
#include <machine/reg.h>
#endif
#include <sys/file.h>
/* Kludge: There's no explicit mechanism provided by sys/core.h to
conditionally know whether a proc_info has thread id fields.
However, CORE_ANON_SHMEM shows up first at 10.30, which is
happily also when meaningful thread id's show up in proc_info. */
#if defined(CORE_ANON_SHMEM)
#define PROC_INFO_HAS_THREAD_ID (1)
#endif
/* This type appears at HP-UX 10.30. Defining it if not defined
by sys/core.h allows us to build for older HP-UX's, and (since
it won't be encountered in core-dumps from older HP-UX's) is
harmless. */
#if !defined(CORE_ANON_SHMEM)
#define CORE_ANON_SHMEM 0x00000200 /* anonymous shared memory */
#endif
/* These are stored in the bfd's tdata */
/* .lwpid and .user_tid are only valid if PROC_INFO_HAS_THREAD_ID, else they
are set to 0. Also, until HP-UX implements MxN threads, .user_tid and
.lwpid are synonymous. */
struct hpux_core_struct
{
int sig;
int lwpid; /* Kernel thread ID. */
unsigned long user_tid; /* User thread ID. */
char cmd[MAXCOMLEN + 1];
};
#define core_hdr(bfd) ((bfd)->tdata.hpux_core_data)
#define core_signal(bfd) (core_hdr(bfd)->sig)
#define core_command(bfd) (core_hdr(bfd)->cmd)
#define core_kernel_thread_id(bfd) (core_hdr(bfd)->lwpid)
#define core_user_thread_id(bfd) (core_hdr(bfd)->user_tid)
#define hpux_core_core_file_matches_executable_p generic_core_file_matches_executable_p
#define hpux_core_core_file_pid _bfd_nocore_core_file_pid
static asection *make_bfd_asection (bfd *, const char *, flagword,
bfd_size_type, bfd_vma, unsigned int);
static bfd_cleanup hpux_core_core_file_p (bfd *);
static char *hpux_core_core_file_failing_command (bfd *);
static int hpux_core_core_file_failing_signal (bfd *);
static void swap_abort (void);
static asection *
make_bfd_asection (bfd *abfd, const char *name, flagword flags,
bfd_size_type size, bfd_vma vma,
unsigned int alignment_power)
{
asection *asect;
char *newname;
newname = bfd_alloc (abfd, (bfd_size_type) strlen (name) + 1);
if (!newname)
return NULL;
strcpy (newname, name);
asect = bfd_make_section_anyway_with_flags (abfd, newname, flags);
if (!asect)
return NULL;
asect->size = size;
asect->vma = vma;
asect->filepos = bfd_tell (abfd);
asect->alignment_power = alignment_power;
return asect;
}
/* Return true if the given core file section corresponds to a thread,
based on its name. */
static int
thread_section_p (bfd *abfd ATTRIBUTE_UNUSED,
asection *sect,
void *obj ATTRIBUTE_UNUSED)
{
return CONST_STRNEQ (sect->name, ".reg/");
}
/* this function builds a bfd target if the file is a corefile.
It returns null or 0 if it finds out thaat it is not a core file.
The way it checks this is by looking for allowed 'type' field values.
These are declared in sys/core.h
There are some values which are 'reserved for future use'. In particular
CORE_NONE is actually defined as 0. This may be a catch-all for cases
in which the core file is generated by some non-hpux application.
(I am just guessing here!)
*/
static bfd_cleanup
hpux_core_core_file_p (bfd *abfd)
{
int good_sections = 0;
int unknown_sections = 0;
core_hdr (abfd) = (struct hpux_core_struct *)
bfd_zalloc (abfd, (bfd_size_type) sizeof (struct hpux_core_struct));
if (!core_hdr (abfd))
return NULL;
while (1)
{
int val;
struct corehead core_header;
val = bfd_bread ((void *) &core_header,
(bfd_size_type) sizeof core_header, abfd);
if (val <= 0)
break;
switch (core_header.type)
{
case CORE_KERNEL:
case CORE_FORMAT:
/* Just skip this. */
bfd_seek (abfd, (file_ptr) core_header.len, SEEK_CUR);
good_sections++;
break;
case CORE_EXEC:
{
struct proc_exec proc_exec;
if (bfd_bread ((void *) &proc_exec, (bfd_size_type) core_header.len,
abfd) != core_header.len)
break;
strncpy (core_command (abfd), proc_exec.cmd, MAXCOMLEN + 1);
good_sections++;
}
break;
case CORE_PROC:
{
struct proc_info proc_info;
char secname[100]; /* Of arbitrary size, but plenty large. */
/* We need to read this section, 'cause we need to determine
whether the core-dumped app was threaded before we create
any .reg sections. */
if (bfd_bread (&proc_info, (bfd_size_type) core_header.len, abfd)
!= core_header.len)
break;
/* However, we also want to create those sections with the
file positioned at the start of the record, it seems. */
if (bfd_seek (abfd, -((file_ptr) core_header.len), SEEK_CUR) != 0)
break;
#if defined(PROC_INFO_HAS_THREAD_ID)
core_kernel_thread_id (abfd) = proc_info.lwpid;
core_user_thread_id (abfd) = proc_info.user_tid;
#else
core_kernel_thread_id (abfd) = 0;
core_user_thread_id (abfd) = 0;
#endif
/* If the program was unthreaded, then we'll just create a
.reg section.
If the program was threaded, then we'll create .reg/XXXXX
section for each thread, where XXXXX is a printable
representation of the kernel thread id. We'll also
create a .reg section for the thread that was running
and signalled at the time of the core-dump (i.e., this
is effectively an alias, needed to keep GDB happy.)
Note that we use `.reg/XXXXX' as opposed to '.regXXXXX'
because GDB expects that .reg2 will be the floating-
point registers. */
if (core_kernel_thread_id (abfd) == 0)
{
if (!make_bfd_asection (abfd, ".reg",
SEC_HAS_CONTENTS,
core_header.len,
(bfd_vma) offsetof (struct proc_info,
hw_regs),
2))
goto fail;
}
else
{
/* There are threads. Is this the one that caused the
core-dump? We'll claim it was the running thread. */
if (proc_info.sig != -1)
{
if (!make_bfd_asection (abfd, ".reg",
SEC_HAS_CONTENTS,
core_header.len,
(bfd_vma)offsetof (struct proc_info,
hw_regs),
2))
goto fail;
}
/* We always make one of these sections, for every thread. */
sprintf (secname, ".reg/%d", core_kernel_thread_id (abfd));
if (!make_bfd_asection (abfd, secname,
SEC_HAS_CONTENTS,
core_header.len,
(bfd_vma) offsetof (struct proc_info,
hw_regs),
2))
goto fail;
}
core_signal (abfd) = proc_info.sig;
if (bfd_seek (abfd, (file_ptr) core_header.len, SEEK_CUR) != 0)
break;
good_sections++;
}
break;
case CORE_DATA:
case CORE_STACK:
case CORE_TEXT:
case CORE_MMF:
case CORE_SHM:
case CORE_ANON_SHMEM:
if (!make_bfd_asection (abfd, ".data",
SEC_ALLOC + SEC_LOAD + SEC_HAS_CONTENTS,
core_header.len,
(bfd_vma) core_header.addr, 2))
goto fail;
bfd_seek (abfd, (file_ptr) core_header.len, SEEK_CUR);
good_sections++;
break;
case CORE_NONE:
/* Let's not punt if we encounter a section of unknown
type. Rather, let's make a note of it. If we later
see that there were also "good" sections, then we'll
declare that this a core file, but we'll also warn that
it may be incompatible with this gdb.
*/
unknown_sections++;
break;
default:
goto fail; /*unrecognized core file type */
}
}
/* OK, we believe you. You're a core file (sure, sure). */
/* On HP/UX, we sometimes encounter core files where none of the threads
was found to be the running thread (ie the signal was set to -1 for
all threads). This happens when the program was aborted externally
via a TT_CORE ttrace system call. In that case, we just pick one
thread at random to be the active thread. */
if (core_kernel_thread_id (abfd) != 0
&& bfd_get_section_by_name (abfd, ".reg") == NULL)
{
asection *asect = bfd_sections_find_if (abfd, thread_section_p, NULL);
asection *reg_sect;
if (asect != NULL)
{
reg_sect = make_bfd_asection (abfd, ".reg", asect->flags,
asect->size, asect->vma,
asect->alignment_power);
if (reg_sect == NULL)
goto fail;
reg_sect->filepos = asect->filepos;
}
}
/* Were there sections of unknown type? If so, yet there were
at least some complete sections of known type, then, issue
a warning. Possibly the core file was generated on a version
of HP-UX that is incompatible with that for which this gdb was
built.
*/
if ((unknown_sections > 0) && (good_sections > 0))
_bfd_error_handler
("%pB appears to be a core file,\nbut contains unknown sections."
" It may have been created on an incompatible\nversion of HP-UX."
" As a result, some information may be unavailable.\n",
abfd);
return _bfd_no_cleanup;
fail:
bfd_release (abfd, core_hdr (abfd));
core_hdr (abfd) = NULL;
bfd_section_list_clear (abfd);
return NULL;
}
static char *
hpux_core_core_file_failing_command (bfd *abfd)
{
return core_command (abfd);
}
static int
hpux_core_core_file_failing_signal (bfd *abfd)
{
return core_signal (abfd);
}
/* If somebody calls any byte-swapping routines, shoot them. */
static void
swap_abort (void)
{
abort(); /* This way doesn't require any declaration for ANSI to fuck up */
}
#define NO_GET ((bfd_vma (*) (const void *)) swap_abort)
#define NO_PUT ((void (*) (bfd_vma, void *)) swap_abort)
#define NO_GETS ((bfd_signed_vma (*) (const void *)) swap_abort)
#define NO_GET64 ((bfd_uint64_t (*) (const void *)) swap_abort)
#define NO_PUT64 ((void (*) (bfd_uint64_t, void *)) swap_abort)
#define NO_GETS64 ((bfd_int64_t (*) (const void *)) swap_abort)
const bfd_target core_hpux_vec =
{
"hpux-core",
bfd_target_unknown_flavour,
BFD_ENDIAN_BIG, /* target byte order */
BFD_ENDIAN_BIG, /* target headers byte order */
(HAS_RELOC | EXEC_P | /* object flags */
HAS_LINENO | HAS_DEBUG |
HAS_SYMS | HAS_LOCALS | WP_TEXT | D_PAGED),
(SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC), /* section flags */
0, /* symbol prefix */
' ', /* ar_pad_char */
16, /* ar_max_namelen */
0, /* match priority. */
NO_GET64, NO_GETS64, NO_PUT64, /* 64 bit data */
NO_GET, NO_GETS, NO_PUT, /* 32 bit data */
NO_GET, NO_GETS, NO_PUT, /* 16 bit data */
NO_GET64, NO_GETS64, NO_PUT64, /* 64 bit hdrs */
NO_GET, NO_GETS, NO_PUT, /* 32 bit hdrs */
NO_GET, NO_GETS, NO_PUT, /* 16 bit hdrs */
{ /* bfd_check_format */
_bfd_dummy_target, /* unknown format */
_bfd_dummy_target, /* object file */
_bfd_dummy_target, /* archive */
hpux_core_core_file_p /* a core file */
},
{ /* bfd_set_format */
_bfd_bool_bfd_false_error,
_bfd_bool_bfd_false_error,
_bfd_bool_bfd_false_error,
_bfd_bool_bfd_false_error
},
{ /* bfd_write_contents */
_bfd_bool_bfd_false_error,
_bfd_bool_bfd_false_error,
_bfd_bool_bfd_false_error,
_bfd_bool_bfd_false_error
},
BFD_JUMP_TABLE_GENERIC (_bfd_generic),
BFD_JUMP_TABLE_COPY (_bfd_generic),
BFD_JUMP_TABLE_CORE (hpux_core),
BFD_JUMP_TABLE_ARCHIVE (_bfd_noarchive),
BFD_JUMP_TABLE_SYMBOLS (_bfd_nosymbols),
BFD_JUMP_TABLE_RELOCS (_bfd_norelocs),
BFD_JUMP_TABLE_WRITE (_bfd_generic),
BFD_JUMP_TABLE_LINK (_bfd_nolink),
BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
NULL,
NULL /* backend_data */
};
| gpl-2.0 |
nchong/icliggghts | src/ASPHERE/fix_nve_asphere.cpp | 7251 | /* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
http://lammps.sandia.gov, Sandia National Laboratories
Steve Plimpton, [email protected]
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
/* ----------------------------------------------------------------------
Contributing author: Mike Brown (SNL)
------------------------------------------------------------------------- */
#include "math.h"
#include "stdio.h"
#include "string.h"
#include "fix_nve_asphere.h"
#include "math_extra.h"
#include "atom.h"
#include "atom_vec.h"
#include "force.h"
#include "update.h"
#include "memory.h"
#include "error.h"
using namespace LAMMPS_NS;
/* ---------------------------------------------------------------------- */
FixNVEAsphere::FixNVEAsphere(LAMMPS *lmp, int narg, char **arg) :
FixNVE(lmp, narg, arg)
{
inertia =
memory->create_2d_double_array(atom->ntypes+1,3,"fix_nve_asphere:inertia");
// error checks
if (!atom->angmom_flag || !atom->quat_flag || !atom->torque_flag ||
!atom->avec->shape_type)
error->all("Fix nve/asphere requires atom attributes "
"angmom, quat, torque, shape");
if (atom->radius_flag || atom->rmass_flag)
error->all("Fix nve/asphere cannot be used with atom attributes "
"diameter or rmass");
}
/* ---------------------------------------------------------------------- */
FixNVEAsphere::~FixNVEAsphere()
{
memory->destroy_2d_double_array(inertia);
}
/* ---------------------------------------------------------------------- */
void FixNVEAsphere::init()
{
// check that all particles are finite-size
// no point particles allowed, spherical is OK
double **shape = atom->shape;
int *type = atom->type;
int *mask = atom->mask;
int nlocal = atom->nlocal;
if (igroup == atom->firstgroup) nlocal = atom->nfirst;
for (int i = 0; i < nlocal; i++)
if (mask[i] & groupbit)
if (shape[type[i]][0] == 0.0)
error->one("Fix nve/asphere requires extended particles");
FixNVE::init();
calculate_inertia();
}
/* ---------------------------------------------------------------------- */
void FixNVEAsphere::initial_integrate(int vflag)
{
double dtfm;
double **x = atom->x;
double **v = atom->v;
double **f = atom->f;
double **quat = atom->quat;
double **angmom = atom->angmom;
double **torque = atom->torque;
double *mass = atom->mass;
int *type = atom->type;
int *mask = atom->mask;
int nlocal = atom->nlocal;
if (igroup == atom->firstgroup) nlocal = atom->nfirst;
// set timestep here since dt may have changed or come via rRESPA
dtq = 0.5 * dtv;
for (int i = 0; i < nlocal; i++)
if (mask[i] & groupbit) {
dtfm = dtf / mass[type[i]];
v[i][0] += dtfm * f[i][0];
v[i][1] += dtfm * f[i][1];
v[i][2] += dtfm * f[i][2];
x[i][0] += dtv * v[i][0];
x[i][1] += dtv * v[i][1];
x[i][2] += dtv * v[i][2];
// update angular momentum by 1/2 step
// update quaternion a full step via Richardson iteration
// returns new normalized quaternion
angmom[i][0] += dtf * torque[i][0];
angmom[i][1] += dtf * torque[i][1];
angmom[i][2] += dtf * torque[i][2];
richardson(quat[i],angmom[i],inertia[type[i]]);
}
}
/* ---------------------------------------------------------------------- */
void FixNVEAsphere::final_integrate()
{
double dtfm;
double **v = atom->v;
double **f = atom->f;
double **angmom = atom->angmom;
double **torque = atom->torque;
double *mass = atom->mass;
int *type = atom->type;
int *mask = atom->mask;
int nlocal = atom->nlocal;
if (igroup == atom->firstgroup) nlocal = atom->nfirst;
for (int i = 0; i < nlocal; i++)
if (mask[i] & groupbit) {
dtfm = dtf / mass[type[i]];
v[i][0] += dtfm * f[i][0];
v[i][1] += dtfm * f[i][1];
v[i][2] += dtfm * f[i][2];
angmom[i][0] += dtf * torque[i][0];
angmom[i][1] += dtf * torque[i][1];
angmom[i][2] += dtf * torque[i][2];
}
}
/* ----------------------------------------------------------------------
Richardson iteration to update quaternion accurately
------------------------------------------------------------------------- */
void FixNVEAsphere::richardson(double *q, double *m, double *moments)
{
// compute omega at 1/2 step from m at 1/2 step and q at 0
double w[3];
omega_from_mq(q,m,moments,w);
// full update from dq/dt = 1/2 w q
double wq[4];
MathExtra::multiply_vec_quat(w,q,wq);
double qfull[4];
qfull[0] = q[0] + dtq * wq[0];
qfull[1] = q[1] + dtq * wq[1];
qfull[2] = q[2] + dtq * wq[2];
qfull[3] = q[3] + dtq * wq[3];
MathExtra::normalize4(qfull);
// 1st half of update from dq/dt = 1/2 w q
double qhalf[4];
qhalf[0] = q[0] + 0.5*dtq * wq[0];
qhalf[1] = q[1] + 0.5*dtq * wq[1];
qhalf[2] = q[2] + 0.5*dtq * wq[2];
qhalf[3] = q[3] + 0.5*dtq * wq[3];
MathExtra::normalize4(qhalf);
// re-compute omega at 1/2 step from m at 1/2 step and q at 1/2 step
// recompute wq
omega_from_mq(qhalf,m,moments,w);
MathExtra::multiply_vec_quat(w,qhalf,wq);
// 2nd half of update from dq/dt = 1/2 w q
qhalf[0] += 0.5*dtq * wq[0];
qhalf[1] += 0.5*dtq * wq[1];
qhalf[2] += 0.5*dtq * wq[2];
qhalf[3] += 0.5*dtq * wq[3];
MathExtra::normalize4(qhalf);
// corrected Richardson update
q[0] = 2.0*qhalf[0] - qfull[0];
q[1] = 2.0*qhalf[1] - qfull[1];
q[2] = 2.0*qhalf[2] - qfull[2];
q[3] = 2.0*qhalf[3] - qfull[3];
MathExtra::normalize4(q);
}
/* ----------------------------------------------------------------------
compute omega from angular momentum
w = omega = angular velocity in space frame
wbody = angular velocity in body frame
project space-frame angular momentum onto body axes
and divide by principal moments
------------------------------------------------------------------------- */
void FixNVEAsphere::omega_from_mq(double *q, double *m, double *moments,
double *w)
{
double rot[3][3];
MathExtra::quat_to_mat(q,rot);
double wbody[3];
MathExtra::transpose_times_column3(rot,m,wbody);
wbody[0] /= moments[0];
wbody[1] /= moments[1];
wbody[2] /= moments[2];
MathExtra::times_column3(rot,wbody,w);
}
/* ----------------------------------------------------------------------
principal moments of inertia for ellipsoids
------------------------------------------------------------------------- */
void FixNVEAsphere::calculate_inertia()
{
double *mass = atom->mass;
double **shape = atom->shape;
for (int i = 1; i <= atom->ntypes; i++) {
inertia[i][0] = 0.2*mass[i] *
(shape[i][1]*shape[i][1]+shape[i][2]*shape[i][2]);
inertia[i][1] = 0.2*mass[i] *
(shape[i][0]*shape[i][0]+shape[i][2]*shape[i][2]);
inertia[i][2] = 0.2*mass[i] *
(shape[i][0]*shape[i][0]+shape[i][1]*shape[i][1]);
}
}
| gpl-2.0 |
ArthySundaram/chromeos-kvm | drivers/gpu/arm/t6xx/kbase/src/common/mali_kbase_pm.h | 40979 | /*
*
* (C) COPYRIGHT 2010-2012 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
*
* A copy of the licence is included with the program, and can also be obtained from Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
*/
/**
* @file mali_kbase_pm.h
* Power management API definitions
*/
#ifndef _KBASE_PM_H_
#define _KBASE_PM_H_
#include <kbase/src/common/mali_midg_regmap.h>
#include <asm/atomic.h>
#include "mali_kbase_pm_always_on.h"
#include "mali_kbase_pm_demand.h"
#include "mali_kbase_pm_coarse_demand.h"
/* Frequency that DVFS clock frequency decisions should be made */
#define KBASE_PM_DVFS_FREQUENCY 250
/* Shift used for kbasep_pm_metrics_data.time_busy/idle - units of (1 << 8) ns
This gives a maximum period between samples of 2^(32+8)/100 ns = slightly under 11s.
Exceeding this will cause overflow */
#define KBASE_PM_TIME_SHIFT 8
/* Forward definition - see mali_kbase.h */
struct kbase_device;
/** List of policy IDs */
typedef enum kbase_pm_policy_id
{
KBASE_PM_POLICY_ID_DEMAND = 1,
KBASE_PM_POLICY_ID_ALWAYS_ON,
KBASE_PM_POLICY_ID_COARSE_DEMAND
} kbase_pm_policy_id;
/** The types of core in a GPU.
*
* These enumerated values are used in calls to @ref kbase_pm_invoke_power_up, @ref kbase_pm_invoke_power_down, @ref
* kbase_pm_get_present_cores, @ref kbase_pm_get_active_cores, @ref kbase_pm_get_trans_cores, @ref
* kbase_pm_get_ready_cores. The specify which type of core should be acted on.
* These values are set in a manner that allows @ref core_type_to_reg function to be simpler and more efficient.
*/
typedef enum kbase_pm_core_type
{
KBASE_PM_CORE_L3 = L3_PRESENT_LO, /**< The L3 cache */
KBASE_PM_CORE_L2 = L2_PRESENT_LO, /**< The L2 cache */
KBASE_PM_CORE_SHADER = SHADER_PRESENT_LO, /**< Shader cores */
KBASE_PM_CORE_TILER = TILER_PRESENT_LO /**< Tiler cores */
} kbase_pm_core_type;
/** Initialize the power management framework.
*
* Must be called before any other power management function
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
*
* @return MALI_ERROR_NONE if the power management framework was successfully initialized.
*/
mali_error kbase_pm_init(struct kbase_device *kbdev);
/** Power up GPU after all modules have been initialized and interrupt handlers installed.
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
*
* @return MALI_ERROR_NONE if powerup was successful.
*/
mali_error kbase_pm_powerup(struct kbase_device *kbdev);
/**
* Halt the power management framework.
* Should ensure that no new interrupts are generated,
* but allow any currently running interrupt handlers to complete successfully.
* No event can make the pm system turn on the GPU after this function returns.
* The active policy is sent @ref KBASE_PM_EVENT_SYSTEM_SUSPEND.
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_halt(struct kbase_device *kbdev);
/** Terminate the power management framework.
*
* No power management functions may be called after this
* (except @ref kbase_pm_init)
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_term(struct kbase_device *kbdev);
/** Check if the metrics gathering framework is active.
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
*/
mali_bool kbasep_pm_metrics_isactive(struct kbase_device *kbdev);
/** Events that can be sent to a power policy.
*
* Power policies are expected to handle all these events, although they may choose to take no action.
*/
typedef enum kbase_pm_event
{
/* helper for tests */
KBASEP_PM_EVENT_FIRST,
/** Initialize the power policy.
*
* This event is sent immediately after the @ref kbase_pm_policy.init function of the policy returns.
*
* The policy may decide to transition the cores to its 'normal' state (e.g. an always on policy would turn all
* the cores on). The policy should assume that the GPU is in active use (i.e. as if the @ref
* KBASE_PM_EVENT_GPU_ACTIVE event had been received), if this is not the case then @ref KBASE_PM_EVENT_GPU_IDLE
* will be called after this event has been handled.
*/
KBASE_PM_EVENT_POLICY_INIT = KBASEP_PM_EVENT_FIRST,
/** The power state of the device has changed.
*
* This event is sent when the GPU raises an interrupt to announce that a power transition has finished. Because
* there may be multiple power transitions the power policy must interrogate the state of the GPU to check whether
* all expected transitions have finished. If the GPU has just turned on or off then the policy must call @ref
* kbase_pm_power_up_done or @ref kbase_pm_power_down_done as appropriate.
*/
KBASE_PM_EVENT_GPU_STATE_CHANGED,
/** The GPU is becoming active.
*
* This event is sent when the first context is about to use the GPU.
*
* If the core is turned off then this event must cause the core to turn on. This is done asynchronously and the
* policy must call the function kbase_pm_power_up_done to signal that the core is turned on sufficiently to allow
* register access.
*/
KBASE_PM_EVENT_GPU_ACTIVE,
/** The GPU is becoming idle.
*
* This event is sent when the last context has finished using the GPU.
*
* The power policy may turn the GPU off entirely (e.g. turn the clocks or power off).
*/
KBASE_PM_EVENT_GPU_IDLE,
/** The system has requested a change of power policy.
*
* The current policy receives this message when a request to change policy occurs. It must ensure that all active
* power transitions are completed and then call the @ref kbase_pm_change_policy function.
*
* This event is only delivered when the policy has been informed that the GPU is 'active' (the power management
* code internally increments the context active counter during a policy change).
*/
KBASE_PM_EVENT_POLICY_CHANGE,
/** The system is requesting to suspend the GPU.
*
* The power policy should ensure that the GPU is shut down sufficiently for the system to suspend the device.
* Once the GPU is ready the policy should call @ref kbase_pm_power_down_done.
*/
KBASE_PM_EVENT_SYSTEM_SUSPEND,
/** The system is requesting to resume the GPU.
*
* The power policy should restore the GPU to the state it was before the previous
* @ref KBASE_PM_EVENT_SYSTEM_SUSPEND event. If the GPU is being powered up then it should call
* @ref kbase_pm_power_transitioning before changing the state and @ref kbase_pm_power_up_done when
* the transition is complete.
*/
KBASE_PM_EVENT_SYSTEM_RESUME,
/** The job scheduler is requesting to power up/down cores.
*
* This event is sent when:
* - powered down cores are needed to complete a job
* - powered up cores are not needed anymore
*/
KBASE_PM_EVENT_CHANGE_GPU_STATE,
/* helpers for tests */
KBASEP_PM_EVENT_LAST = KBASE_PM_EVENT_CHANGE_GPU_STATE,
KBASEP_PM_EVENT_INVALID
} kbase_pm_event;
/** Flags that give information about Power Policies */
enum
{
/** This policy does not power up/down cores and L2/L3 caches individually,
* outside of KBASE_PM_EVENT_GPU_IDLE and KBASE_PM_EVENT_GPU_ACTIVE events.
* That is, the policy guarantees all cores/L2/L3 caches will be powered
* after a KBASE_PM_EVENT_GPU_ACTIVE event.
*
* Hence, it does not need to be sent KBASE_PM_EVENT_CHANGE_GPU_STATE
* events. */
KBASE_PM_POLICY_FLAG_NO_CORE_TRANSITIONS = (1u << 0)
};
typedef u32 kbase_pm_policy_flags;
typedef union kbase_pm_policy_data
{
kbasep_pm_policy_always_on always_on;
kbasep_pm_policy_demand demand;
kbasep_pm_policy_coarse_demand coarse_demand;
} kbase_pm_policy_data;
/** Power policy structure.
*
* Each power management policy exposes a (static) instance of this structure which contains function pointers to the
* policy's methods.
*/
typedef struct kbase_pm_policy
{
/** The name of this policy */
char *name;
/** Function called when the policy is selected
*
* This should initialize the kbdev->pm.policy_data pointer to the policy's data structure. It should not attempt
* to make any changes to hardware state.
*
* It is undefined what state the cores are in when the function is called, however no power transitions should be
* occurring.
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
*/
void (*init)(struct kbase_device *kbdev);
/** Function called when the policy is unselected.
*
* This should free any data allocated with \c init
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
*/
void (*term)(struct kbase_device *kbdev);
/** Function called when there is an event to process
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
* @param event The event to process
*/
void (*event)(struct kbase_device *kbdev, kbase_pm_event event);
/** Field indicating flags for this policy */
kbase_pm_policy_flags flags;
/** Field indicating an ID for this policy. This is not necessarily the
* same as its index in the list returned by kbase_pm_list_policies().
* It is used purely for debugging. */
kbase_pm_policy_id id;
} kbase_pm_policy;
/** Metrics data collected for use by the power management framework.
*
*/
typedef struct kbasep_pm_metrics_data
{
int vsync_hit;
int utilisation;
ktime_t time_period_start;
u32 time_busy;
u32 time_idle;
mali_bool gpu_active;
spinlock_t lock;
struct hrtimer timer;
mali_bool timer_active;
void * platform_data;
struct kbase_device * kbdev;
} kbasep_pm_metrics_data;
/** Actions for DVFS.
*
* kbase_pm_get_dvfs_action will return one of these enumerated values to
* describe the action that the DVFS system should take.
*/
typedef enum kbase_pm_dvfs_action
{
KBASE_PM_DVFS_NOP, /**< No change in clock frequency is requested */
KBASE_PM_DVFS_CLOCK_UP, /**< The clock frequency should be increased if possible */
KBASE_PM_DVFS_CLOCK_DOWN /**< The clock frequency should be decreased if possible */
} kbase_pm_dvfs_action;
/** A value for an atomic @ref kbase_pm_device_data::work_active,
* which tracks whether the work unit has been enqueued.
*/
typedef enum kbase_pm_work_active_state
{
KBASE_PM_WORK_ACTIVE_STATE_INACTIVE = 0x00u, /**< There are no work units enqueued and @ref kbase_pm_worker is not running. */
KBASE_PM_WORK_ACTIVE_STATE_ENQUEUED = 0x01u, /**< There is a work unit enqueued, but @ref kbase_pm_worker is not running. */
KBASE_PM_WORK_ACTIVE_STATE_PROCESSING = 0x02u, /**< @ref kbase_pm_worker is running. */
KBASE_PM_WORK_ACTIVE_STATE_PENDING_EVT = 0x03u /**< Processing and there's an event outstanding.
@ref kbase_pm_worker is running, but @ref kbase_pm_device_data::pending_events
has been updated since it started so
it should recheck the list of pending events before exiting. */
} kbase_pm_work_active_state;
/** Data stored per device for power management.
*
* This structure contains data for the power management framework. There is one instance of this structure per device
* in the system.
*/
typedef struct kbase_pm_device_data
{
/** The policy that is currently actively controlling the power state. */
const kbase_pm_policy *current_policy;
/** The policy that the system is transitioning to. */
const kbase_pm_policy *new_policy;
/** The data needed for the current policy. This is considered private to the policy. */
kbase_pm_policy_data policy_data;
/** The workqueue that the policy callbacks are executed on. */
struct workqueue_struct *workqueue;
/** A bit mask of events that are waiting to be delivered to the active policy. */
atomic_t pending_events;
/** The work unit that is enqueued onto the workqueue. */
struct work_struct work;
/** An atomic which tracks whether the work unit has been enqueued.
* For list of possible values please refer to @ref kbase_pm_work_active_state.
*/
atomic_t work_active;
/** Power state and a queue to wait for changes */
#define PM_POWER_STATE_OFF 1
#define PM_POWER_STATE_TRANS 2
#define PM_POWER_STATE_ON 3
int power_state;
wait_queue_head_t power_state_wait;
/** Wait queue for whether the l2 cache has been powered as requested */
wait_queue_head_t l2_powered_wait;
/** State indicating whether all the l2 caches are powered.
* Non-zero indicates they're *all* powered
* Zero indicates that some (or all) are not powered */
int l2_powered;
int no_outstanding_event;
wait_queue_head_t no_outstanding_event_wait;
/** The reference count of active contexts on this device. */
int active_count;
/** Lock to protect active_count */
spinlock_t active_count_lock;
/** The reference count of active gpu cycle counter users */
int gpu_cycle_counter_requests;
/** Lock to protect gpu_cycle_counter_requests */
spinlock_t gpu_cycle_counter_requests_lock;
/** A bit mask identifying the shader cores that the power policy would like to be on.
* The current state of the cores may be different, but there should be transitions in progress that will
* eventually achieve this state (assuming that the policy doesn't change its mind in the mean time.
*/
u64 desired_shader_state;
/** bit mask indicating which shader cores are currently in a power-on transition */
u64 powering_on_shader_state;
/** A bit mask identifying the tiler cores that the power policy would like to be on.
* @see kbase_pm_device_data:desired_shader_state */
u64 desired_tiler_state;
/** bit mask indicating which tiler core are currently in a power-on transition */
u64 powering_on_tiler_state;
/** bit mask indicating which l2-caches are currently in a power-on transition */
u64 powering_on_l2_state;
/** bit mask indicating which l3-caches are currently in a power-on transition */
u64 powering_on_l3_state;
/** Lock protecting the power state of the device.
*
* This lock must be held when accessing the shader_available_bitmap, tiler_available_bitmap, shader_inuse_bitmap
* and tiler_inuse_bitmap fields of kbase_device. It is also held when the hardware power registers are being
* written to, to ensure that two threads do not conflict over the power transitions that the hardware should
* make.
*/
spinlock_t power_change_lock;
/** This flag is set iff the GPU is powered as requested by the desired_xxx_state variables */
atomic_t gpu_in_desired_state;
/** Set to true when the GPU is powered and register accesses are possible, false otherwise */
mali_bool gpu_powered;
/** Spinlock that must be held when writing gpu_powered */
spinlock_t gpu_powered_lock;
/** Structure to hold metrics for the GPU */
kbasep_pm_metrics_data metrics;
/** Callback when the GPU needs to be turned on. See @ref kbase_pm_callback_conf
*
* @param kbdev The kbase device
*
* @return 1 if GPU state was lost, 0 otherwise
*/
int (*callback_power_on)(struct kbase_device *kbdev);
/** Callback when the GPU may be turned off. See @ref kbase_pm_callback_conf
*
* @param kbdev The kbase device
*/
void (*callback_power_off)(struct kbase_device *kbdev);
/** Callback for initializing the runtime power management.
*
* @param kbdev The kbase device
*
* @return MALI_ERROR_NONE on success, else error code
*/
mali_error (*callback_power_runtime_init)(struct kbase_device *kbdev);
/** Callback for terminating the runtime power management.
*
* @param kbdev The kbase device
*/
void (*callback_power_runtime_term)(struct kbase_device *kbdev);
/** Callback when the GPU needs to be turned on. See @ref kbase_pm_callback_conf
*
* @param kbdev The kbase device
*
* @return 1 if GPU state was lost, 0 otherwise
*/
int (*callback_power_runtime_on)(struct kbase_device *kbdev);
/** Callback when the GPU may be turned off. See @ref kbase_pm_callback_conf
*
* @param kbdev The kbase device
*/
void (*callback_power_runtime_off)(struct kbase_device *kbdev);
} kbase_pm_device_data;
/** Get the current policy.
* Returns the policy that is currently active.
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
*
* @return The current policy
*/
const kbase_pm_policy *kbase_pm_get_policy(struct kbase_device *kbdev);
/** Change the policy to the one specified.
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
* @param policy The policy to change to (valid pointer returned from @ref kbase_pm_list_policies)
*/
void kbase_pm_set_policy(struct kbase_device *kbdev, const kbase_pm_policy *policy);
/** Retrieve a static list of the available policies.
* @param[out] policies An array pointer to take the list of policies. This may be NULL.
* The contents of this array must not be modified.
*
* @return The number of policies
*/
int kbase_pm_list_policies(const kbase_pm_policy * const **policies);
/** The current policy is ready to change to the new policy
*
* The current policy must ensure that all cores have finished transitioning before calling this function.
* The new policy is sent an @ref KBASE_PM_EVENT_POLICY_INIT event.
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_change_policy(struct kbase_device *kbdev);
/** The GPU is idle.
*
* The OS may choose to turn off idle devices
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_dev_idle(struct kbase_device *kbdev);
/** The GPU is active.
*
* The OS should avoid opportunistically turning off the GPU while it is active
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_dev_activate(struct kbase_device *kbdev);
/** Send an event to the active power policy.
*
* The event is queued for sending to the active power policy. The event is merged with the current queue by the @ref
* kbasep_pm_merge_event function which may decide to drop events.
*
* Note that this function may be called in an atomic context on Linux which implies that it must not sleep.
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
* @param event The event that should be queued
*/
void kbase_pm_send_event(struct kbase_device *kbdev, kbase_pm_event event);
/** Turn one or more cores on.
*
* This function is called by the active power policy to turn one or more cores on.
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
* @param type The type of core (see the @ref kbase_pm_core_type enumeration)
* @param cores A bitmask of cores to turn on
*/
void kbase_pm_invoke_power_up(struct kbase_device *kbdev, kbase_pm_core_type type, u64 cores);
/** Turn one or more cores off.
*
* This function is called by the active power policy to turn one or more core off.
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
* @param type The type of core (see the @ref kbase_pm_core_type enumeration)
* @param cores A bitmask of cores to turn off
*/
void kbase_pm_invoke_power_down(struct kbase_device *kbdev, kbase_pm_core_type type, u64 cores);
/** Get details of the cores that are present in the device.
*
* This function can be called by the active power policy to return a bitmask of the cores (of a specified type)
* present in the GPU device and also a count of the number of cores.
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
* @param type The type of core (see the @ref kbase_pm_core_type enumeration)
*
* @return The bit mask of cores present
*/
u64 kbase_pm_get_present_cores(struct kbase_device *kbdev, kbase_pm_core_type type);
/** Get details of the cores that are currently active in the device.
*
* This function can be called by the active power policy to return a bitmask of the cores (of a specified type) that
* are actively processing work (i.e. turned on *and* busy).
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
* @param type The type of core (see the @ref kbase_pm_core_type enumeration)
*
* @return The bit mask of active cores
*/
u64 kbase_pm_get_active_cores(struct kbase_device *kbdev, kbase_pm_core_type type);
/** Get details of the cores that are currently transitioning between power states.
*
* This function can be called by the active power policy to return a bitmask of the cores (of a specified type) that
* are currently transitioning between power states.
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
* @param type The type of core (see the @ref kbase_pm_core_type enumeration)
*
* @return The bit mask of transitioning cores
*/
u64 kbase_pm_get_trans_cores(struct kbase_device *kbdev, kbase_pm_core_type type);
/** Get details of the cores that are currently powered and ready for jobs.
*
* This function can be called by the active power policy to return a bitmask of the cores (of a specified type) that
* are powered and ready for jobs (they may or may not be currently executing jobs).
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
* @param type The type of core (see the @ref kbase_pm_core_type enumeration)
*
* @return The bit mask of ready cores
*/
u64 kbase_pm_get_ready_cores(struct kbase_device *kbdev, kbase_pm_core_type type);
/** Return whether the power manager is active
*
* This function will return true when there are cores (of any time) that are currently transitioning between power
* states.
*
* It can be used on receipt of the @ref KBASE_PM_EVENT_GPU_STATE_CHANGED message to determine whether the requested power
* transitions have completely finished or not.
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
*
* @return true when there are cores transitioning between power states, false otherwise
*/
mali_bool kbase_pm_get_pwr_active(struct kbase_device *kbdev);
/** Turn the clock for the device on, and enable device interrupts.
*
* This function can be used by a power policy to turn the clock for the GPU on. It should be modified during
* integration to perform the necessary actions to ensure that the GPU is fully powered and clocked.
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_clock_on(struct kbase_device *kbdev);
/** Disable device interrupts, and turn the clock for the device off.
*
* This function can be used by a power policy to turn the clock for the GPU off. It should be modified during
* integration to perform the necessary actions to turn the clock off (if this is possible in the integration).
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_clock_off(struct kbase_device *kbdev);
/** Enable interrupts on the device.
*
* Interrupts are also enabled after a call to kbase_pm_clock_on().
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_enable_interrupts(struct kbase_device *kbdev);
/** Disable interrupts on the device.
*
* This prevents interrupt delivery to the CPU so no further @ref KBASE_PM_EVENT_GPU_STATE_CHANGED messages will be
* received until @ref kbase_pm_enable_interrupts or kbase_pm_clock_on() is called.
*
* Interrupts are also disabled after a call to kbase_pm_clock_off().
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_disable_interrupts(struct kbase_device *kbdev);
/** Initialize the hardware
*
* This function checks the GPU ID register to ensure that the GPU is supported by the driver and performs a reset on
* the device so that it is in a known state before the device is used.
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
*
* @return MALI_ERROR_NONE if the device is supported and successfully reset.
*/
mali_error kbase_pm_init_hw(struct kbase_device *kbdev);
/** Inform the power management system that the power state of the device is transitioning.
*
* This function must be called by the active power policy before transitioning the core between an 'off state' and an
* 'on state'. It resets the wait queues that are waited on by @ref kbase_pm_wait_for_power_up and @ref
* kbase_pm_wait_for_power_down.
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_power_transitioning(struct kbase_device *kbdev);
/** The GPU has been powered up successfully.
*
* This function must be called by the active power policy when the GPU has been powered up successfully. It signals
* to the rest of the system that jobs can start being submitted to the device.
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_power_up_done(struct kbase_device *kbdev);
/** The GPU has been reset successfully.
*
* This function must be called by the GPU interrupt handler when the RESET_COMPLETED bit is set. It signals to the
* power management initialization code that the GPU has been successfully reset.
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_reset_done(struct kbase_device *kbdev);
/** The GPU has been powered down successfully.
*
* This function must be called by the active power policy when the GPU has been powered down successfully. It signals
* to the rest of the system that a system suspend can now take place.
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_power_down_done(struct kbase_device *kbdev);
/** Wait for the power policy to signal power up.
*
* This function waits for the power policy to signal power up by calling @ref kbase_pm_power_up_done. After the power
* policy has signalled this the function will return immediately until the power policy calls @ref
* kbase_pm_power_transitioning.
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_wait_for_power_up(struct kbase_device *kbdev);
/** Wait for the power policy to signal power down.
*
* This function waits for the power policy to signal power down by calling @ref kbase_pm_power_down_done. After the
* power policy has signalled this the function will return immediately until the power policy calls @ref
* kbase_pm_power_transitioning.
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_wait_for_power_down(struct kbase_device *kbdev);
/** Increment the count of active contexts.
*
* This function should be called when a context is about to submit a job. It informs the active power policy that the
* GPU is going to be in use shortly and the policy is expected to start turning on the GPU.
*
* This function will block until the GPU is available.
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_context_active(struct kbase_device *kbdev);
/** Decrement the reference count of active contexts.
*
* This function should be called when a context becomes idle. After this call the GPU may be turned off by the power
* policy so the calling code should ensure that it does not access the GPU's registers.
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_context_idle(struct kbase_device *kbdev);
/** Check if there are any power transitions to make, and if so start them.
*
* This function will check the desired_xx_state members of kbase_pm_device_data and the actual status of the
* hardware to see if any power transitions can be made at this time to make the hardware state closer to the state
* desired by the power policy.
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_check_transitions(struct kbase_device *kbdev);
/** Read the bitmasks of present cores.
*
* This information is cached to avoid having to perform register reads whenever the information is required.
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
*/
void kbasep_pm_read_present_cores(struct kbase_device *kbdev);
/** Mark one or more cores as being required for jobs to be submitted.
*
* This function is called by the job scheduler to mark one or both cores
* as being required to submit jobs that are ready to run.
*
* The cores requested are reference counted and a subsequent call to @ref kbase_pm_register_inuse_cores or
* @ref kbase_pm_unrequest_cores should be made to dereference the cores as being 'needed'.
*
* The current running policy is sent an @ref KBASE_PM_EVENT_CHANGE_GPU_STATE if power up of requested core is
* required.
* The policy is expected to make these cores available at some point in the future,
* but may take an arbitrary length of time to reach this state.
*
* @param kbdev The kbase device structure for the device
* @param shader_cores A bitmask of shader cores which are necessary for the job
* @param tiler_cores A bitmask of tiler cores which are necessary for the job
*
* @return MALI_ERROR_NONE if the cores were successfully requested.
*/
mali_error kbase_pm_request_cores(struct kbase_device *kbdev, u64 shader_cores, u64 tiler_cores);
/** Unmark one or more cores as being required for jobs to be submitted.
*
* This function undoes the effect of @ref kbase_pm_request_cores. It should be used when a job is not
* going to be submitted to the hardware (e.g. the job is cancelled before it is enqueued).
*
* The current running policy is sent an @ref KBASE_PM_EVENT_CHANGE_GPU_STATE if power down of requested core
* is required.
*
* The policy may use this as an indication that it can power down cores.
*
* @param kbdev The kbase device structure for the device
* @param shader_cores A bitmask of shader cores (as given to @ref kbase_pm_request_cores)
* @param tiler_cores A bitmask of tiler cores (as given to @ref kbase_pm_request_cores)
*/
void kbase_pm_unrequest_cores(struct kbase_device *kbdev, u64 shader_cores, u64 tiler_cores);
/** Register a set of cores as in use by a job.
*
* This function should be called after @ref kbase_pm_request_cores when the job is about to be submitted to
* the hardware. It will check that the necessary cores are available and if so update the 'needed' and 'inuse'
* bitmasks to reflect that the job is now committed to being run.
*
* If the necessary cores are not currently available then the function will return MALI_FALSE and have no effect.
*
* @param kbdev The kbase device structure for the device
* @param shader_cores A bitmask of shader cores (as given to @ref kbase_pm_request_cores)
* @param tiler_cores A bitmask of tiler cores (as given to @ref kbase_pm_request_cores)
*
* @return MALI_TRUE if the job can be submitted to the hardware or MALI_FALSE if the job is not ready to run.
*/
mali_bool kbase_pm_register_inuse_cores(struct kbase_device *kbdev, u64 shader_cores, u64 tiler_cores);
/** Release cores after a job has run.
*
* This function should be called when a job has finished running on the hardware. A call to @ref
* kbase_pm_register_inuse_cores must have previously occurred. The reference counts of the specified cores will be
* decremented which may cause the bitmask of 'inuse' cores to be reduced. The power policy may then turn off any
* cores which are no longer 'inuse'.
*
* @param kbdev The kbase device structure for the device
* @param shader_cores A bitmask of shader cores (as given to @ref kbase_pm_register_inuse_cores)
* @param tiler_cores A bitmask of tiler cores (as given to @ref kbase_pm_register_inuse_cores)
*/
void kbase_pm_release_cores(struct kbase_device *kbdev, u64 shader_cores, u64 tiler_cores);
/** Initialize the metrics gathering framework.
*
* This must be called before other metric gathering APIs are called.
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
*
* @return MALI_ERROR_NONE on success, MALI_ERROR_FUNCTION_FAILED on error
*/
mali_error kbasep_pm_metrics_init(struct kbase_device *kbdev);
/** Terminate the metrics gathering framework.
*
* This must be called when metric gathering is no longer required. It is an error to call any metrics gathering
* function (other than kbasep_pm_metrics_init) after calling this function.
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
*/
void kbasep_pm_metrics_term(struct kbase_device *kbdev);
/** Record that the GPU is active.
*
* This records that the GPU is now active. The previous GPU state must have been idle, the function will assert if
* this is not true in a debug build.
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
*/
void kbasep_pm_record_gpu_active(struct kbase_device *kbdev);
/** Record that the GPU is idle.
*
* This records that the GPU is now idle. The previous GPU state must have been active, the function will assert if
* this is not true in a debug build.
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
*/
void kbasep_pm_record_gpu_idle(struct kbase_device *kbdev);
/** Function to be called by the frame buffer driver to update the vsync metric.
*
* This function should be called by the frame buffer driver to update whether the system is hitting the vsync target
* or not. buffer_updated should be true if the vsync corresponded with a new frame being displayed, otherwise it
* should be false. This function does not need to be called every vsync, but only when the value of buffer_updated
* differs from a previous call.
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
* @param buffer_updated True if the buffer has been updated on this VSync, false otherwise
*/
void kbase_pm_report_vsync(struct kbase_device *kbdev, int buffer_updated);
/** Configure the frame buffer device to set the vsync callback.
*
* This function should do whatever is necessary for this integration to ensure that kbase_pm_report_vsync is
* called appropriately.
*
* This function will need porting as part of the integration for a device.
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_register_vsync_callback(struct kbase_device *kbdev);
/** Free any resources that kbase_pm_register_vsync_callback allocated.
*
* This function should perform any cleanup required from the call to kbase_pm_register_vsync_callback.
* No call backs should occur after this function has returned.
*
* This function will need porting as part of the integration for a device.
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_unregister_vsync_callback(struct kbase_device *kbdev);
/** Determine whether the DVFS system should change the clock speed of the GPU.
*
* This function should be called regularly by the DVFS system to check whether the clock speed of the GPU needs
* updating. It will return one of three enumerated values of kbase_pm_dvfs_action:
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
* @retval KBASE_PM_DVFS_NOP The clock does not need changing
* @retval KBASE_PM_DVFS_CLOCK_UP, The clock frequency should be increased if possible.
* @retval KBASE_PM_DVFS_CLOCK_DOWN The clock frequency should be decreased if possible.
*/
kbase_pm_dvfs_action kbase_pm_get_dvfs_action(struct kbase_device *kbdev);
/** Mark that the GPU cycle counter is needed, if the caller is the first caller
* then the GPU cycle counters will be enabled.
*
* The GPU must be powered when calling this function (i.e. @ref kbase_pm_context_active must have been called).
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_request_gpu_cycle_counter(struct kbase_device *kbdev);
/** Mark that the GPU cycle counter is no longer in use, if the caller is the last
* caller then the GPU cycle counters will be disabled. A request must have been made
* before a call to this.
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_release_gpu_cycle_counter(struct kbase_device *kbdev);
/** Enables access to the GPU registers before power management has powered up the GPU
* with kbase_pm_powerup().
*
* Access to registers should be done using kbase_os_reg_read/write() at this stage,
* not kbase_reg_read/write().
*
* This results in the power management callbacks provided in the driver configuration
* to get called to turn on power and/or clocks to the GPU.
* See @ref kbase_pm_callback_conf.
*
* This should only be used before power management is powered up with kbase_pm_powerup()
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_register_access_enable(struct kbase_device *kbdev);
/** Disables access to the GPU registers enabled earlier by a call to
* kbase_pm_register_access_enable().
*
* This results in the power management callbacks provided in the driver configuration
* to get called to turn off power and/or clocks to the GPU.
* See @ref kbase_pm_callback_conf
*
* This should only be used before power management is powered up with kbase_pm_powerup()
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_register_access_disable(struct kbase_device *kbdev);
/** Request the use of l2 caches for all core groups, power up, wait and prevent the power manager from
* powering down the l2 caches.
*
* This tells the power management that the caches should be powered up, and they
* should remain powered, irrespective of the usage of tiler and shader cores. This does not
* return until the l2 caches are powered up.
*
* The caller must call @ref kbase_pm_release_l2_caches when they are finished to
* allow normal power management of the l2 caches to resume.
*
* This should only be used when power management is active.
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_request_l2_caches(struct kbase_device *kbdev);
/** Release the use of l2 caches for all core groups and allow the power manager to
* power them down when necessary.
*
* This tells the power management that the caches can be powered down if necessary, with respect
* to the usage of tiler and shader cores.
*
* The caller must have called @ref kbase_pm_request_l2_caches prior to a call to this.
*
* This should only be used when power management is active.
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
*/
void kbase_pm_release_l2_caches(struct kbase_device *kbdev);
/** Queue mali_dvfs_work which performs GPU voltage/frequency scaling in mali_dvfs_event_proc
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
*
* @return MALI_TRUE on success (currently no error handling present)
*/
int kbase_platform_dvfs_event(struct kbase_device *kbdev);
/** Get the GPU utilisation. Used to determine if the frequency/voltage needs to be scaled.
*
* @param kbdev The kbase device structure for the device (must be a valid pointer)
*
* @return The current GPU utilisation
*/
int kbase_pm_get_dvfs_utilisation(struct kbase_device *kbdev);
#endif /* _KBASE_PM_H_ */
| gpl-2.0 |
jvasileff/ceylon-compiler | test/src/com/redhat/ceylon/compiler/java/test/TestModule.java | 2146 | /*
* Copyright Red Hat Inc. and/or its affiliates and other contributors
* as indicated by the authors tag. All rights reserved.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU General Public License version 2.
*
* This particular file is subject to the "Classpath" exception as provided in the
* LICENSE file that accompanied this code.
*
* This program is distributed in the hope that it will be useful, but WITHOUT A
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
* PARTICULAR PURPOSE. See the GNU General Public License for more details.
* You should have received a copy of the GNU General Public License,
* along with this distribution; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
*/
package com.redhat.ceylon.compiler.java.test;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Annotation used with {@link org.junit.runner.RunWith @RunWith(CeylonModuleRunner.class)}.
* @author tom
*/
@Retention(RetentionPolicy.RUNTIME)
@Target({ElementType.TYPE})
public @interface TestModule {
/** The directory where the source code is to be found */
String srcDirectory();
/** The directory where the resources are to be found */
String resDirectory();
/** Whether the test should fail if there are no tests to run */
boolean errorIfNoTests() default true;
String[] options() default {};
/** The name of the modules to compile */
String[] modules() default {};
String[] dependencies() default {};
Class<? extends CeylonModuleRunner.TestLoader> testLoader() default CeylonModuleRunner.StandardLoader.class;
ModuleSpecifier[] runModulesInNewJvm() default {};
String[] removeAtRuntime() default {};
String[] modulesUsingCheckModule() default {};
String[] modulesUsingCheckFunction() default {};
}
| gpl-2.0 |
braz/OStrichSlurm | src/squeue/print.h | 29512 | /*****************************************************************************\
* print.h - squeue print job definitions
*****************************************************************************
* Copyright (C) 2002-2007 The Regents of the University of California.
* Copyright (C) 2008-2010 Lawrence Livermore National Security
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Joey Ekstrom <[email protected]>
* CODE-OCEC-09-009. All rights reserved.
*
* This file is part of SLURM, a resource management program.
* For details, see <http://slurm.schedmd.com/>.
* Please also read the included file: DISCLAIMER.
*
* SLURM is free software; you can redistribute it and/or modify it under
* the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* In addition, as a special exception, the copyright holders give permission
* to link the code of portions of this program with the OpenSSL library under
* certain conditions as described in each individual source file, and
* distribute linked combinations including the two. You must obey the GNU
* General Public License in all respects for all of the code used other than
* OpenSSL. If you modify file(s) with this exception, you may extend this
* exception to your version of the file(s), but you are not obligated to do
* so. If you do not wish to do so, delete this exception statement from your
* version. If you delete this exception statement from all source files in
* the program, then also delete it here.
*
* SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License along
* with SLURM; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
\*****************************************************************************/
#ifndef _SQUEUE_PRINT_H_
#define _SQUEUE_PRINT_H_
#include "slurm/slurm.h"
#include "src/common/list.h"
#define FORMAT_STRING_SIZE 32
/*****************************************************************************
* Format Structures
*****************************************************************************/
typedef struct job_format {
int (*function) (job_info_t *, int, bool, char*);
uint32_t width;
bool right_justify;
char *suffix;
} job_format_t;
typedef struct step_format {
int (*function) (job_step_info_t *, int, bool, char*);
uint32_t width;
bool right_justify;
char *suffix;
} step_format_t;
typedef struct squeue_job_rec {
job_info_t * job_ptr;
char * part_name;
uint32_t part_prio;
} squeue_job_rec_t;
long job_time_used(job_info_t * job_ptr);
int print_jobs_list(List jobs, List format);
int print_steps_list(List steps, List format);
int print_jobs_array(job_info_t * jobs, int size, List format);
int print_steps_array(job_step_info_t * steps, int size, List format);
int print_job_from_format(squeue_job_rec_t * job_rec_ptr, List list);
int print_step_from_format(job_step_info_t * job_step, List list);
/*****************************************************************************
* Job Line Format Options
*****************************************************************************/
int job_format_add_function(List list, int width, bool right_justify,
char *suffix,
int (*function) (job_info_t *, int, bool, char*));
#define job_format_add_array_job_id(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_array_job_id)
#define job_format_add_array_task_id(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_array_task_id)
#define job_format_add_batch_host(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_batch_host)
#define job_format_add_core_spec(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_core_spec)
#define job_format_add_job_id(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_job_id)
#define job_format_add_job_id2(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_job_id2)
#define job_format_add_partition(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_partition)
#define job_format_add_prefix(list,wid,right,prefix) \
job_format_add_function(list,0,0,prefix,_print_job_prefix)
#define job_format_add_reason(list,wid,right,prefix) \
job_format_add_function(list,wid,right,prefix,_print_job_reason)
#define job_format_add_reason_list(list,wid,right,prefix) \
job_format_add_function(list,wid,right,prefix,_print_job_reason_list)
#define job_format_add_name(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_name)
#define job_format_add_licenses(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_licenses)
#define job_format_add_wckey(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_wckey)
#define job_format_add_user_name(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_user_name)
#define job_format_add_user_id(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_user_id)
#define job_format_add_gres(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_gres)
#define job_format_add_group_name(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_group_name)
#define job_format_add_group_id(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_group_id)
#define job_format_add_job_state(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_job_state)
#define job_format_add_job_state_compact(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix, \
_print_job_job_state_compact)
#define job_format_add_time_left(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix, \
_print_job_time_left)
#define job_format_add_time_limit(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix, \
_print_job_time_limit)
#define job_format_add_time_used(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_time_used)
#define job_format_add_time_submit(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_time_submit)
#define job_format_add_time_start(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_time_start)
#define job_format_add_time_end(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_time_end)
#define job_format_add_priority(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_priority)
#define job_format_add_priority_long(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_priority_long)
#define job_format_add_nodes(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_nodes)
#define job_format_add_schednodes(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_schednodes)
#define job_format_add_node_inx(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_node_inx)
#define job_format_add_num_cpus(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_num_cpus)
#define job_format_add_num_nodes(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_num_nodes)
#define job_format_add_num_sct(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_num_sct)
#define job_format_add_shared(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_shared)
#define job_format_add_contiguous(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_contiguous)
#define job_format_add_min_cpus(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_pn_min_cpus)
#define job_format_add_sockets(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_sockets)
#define job_format_add_cores(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_cores)
#define job_format_add_threads(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_threads)
#define job_format_add_min_memory(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_pn_min_memory)
#define job_format_add_min_tmp_disk(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_pn_min_tmp_disk)
#define job_format_add_req_nodes(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_req_nodes)
#define job_format_add_exc_nodes(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_exc_nodes)
#define job_format_add_req_node_inx(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_req_node_inx)
#define job_format_add_exc_node_inx(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_exc_node_inx)
#define job_format_add_features(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_features)
#define job_format_add_account(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_account)
#define job_format_add_dependency(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_dependency)
#define job_format_add_qos(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_qos)
#define job_format_add_select_jobinfo(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_select_jobinfo)
#define job_format_add_comment(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_comment)
#define job_format_add_reservation(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_reservation)
#define job_format_add_command(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_command)
#define job_format_add_work_dir(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_work_dir)
#define job_format_add_invalid(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,(void*)_print_com_invalid)
#define job_format_add_nice(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_nice)
#define job_format_add_alloc_nodes(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_alloc_nodes)
#define job_format_add_alloc_sid(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_alloc_sid)
#define job_format_add_assoc_id(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_assoc_id)
#define job_format_add_batch_flag(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_batch_flag)
#define job_format_add_boards_per_node(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix, \
_print_job_boards_per_node)
#define job_format_add_cpus_per_task(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix, \
_print_job_cpus_per_task)
#define job_format_add_derived_ec(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_derived_ec)
#define job_format_add_eligible_time(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix, \
_print_job_eligible_time)
#define job_format_add_exit_code(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_exit_code)
#define job_format_add_max_cpus(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_max_cpus)
#define job_format_add_max_nodes(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_max_nodes)
#define job_format_add_network(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_network)
#define job_format_add_ntasks_per_core(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix, \
_print_job_ntasks_per_core)
#define job_format_add_ntasks_per_node(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix, \
_print_job_ntasks_per_node)
#define job_format_add_ntasks_per_socket(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix, \
_print_job_ntasks_per_socket)
#define job_format_add_ntasks_per_board(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix, \
_print_job_ntasks_per_board)
#define job_format_add_preempt_time(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_preempt_time)
#define job_format_add_profile(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_profile)
#define job_format_add_reboot(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_reboot)
#define job_format_add_req_switch(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_req_switch)
#define job_format_add_requeue(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_requeue)
#define job_format_add_resize_time(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_resize_time)
#define job_format_add_restart_cnt(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_restart_cnt)
#define job_format_add_sockets_per_board(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix, \
_print_job_sockets_per_board)
#define job_format_add_std_err(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_std_err)
#define job_format_add_std_in(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_std_in)
#define job_format_add_std_out(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_std_out)
#define job_format_add_min_time(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_min_time)
#define job_format_add_wait4switch(list,wid,right,suffix) \
job_format_add_function(list,wid,right,suffix,_print_job_wait4switch)
/*****************************************************************************
* Job Line Print Functions
*****************************************************************************/
int _print_job_array_job_id(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_array_task_id(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_batch_host(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_core_spec(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_job_id(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_job_id2(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_prefix(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_reason(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_reason_list(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_name(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_licenses(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_wckey(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_user_id(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_user_name(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_gres(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_group_id(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_group_name(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_job_state(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_job_state_compact(job_info_t * job, int width,
bool right_justify, char* suffix);
int _print_job_time_left(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_time_limit(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_time_used(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_time_submit(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_time_start(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_time_end(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_priority(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_priority_long(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_nodes(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_schednodes(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_node_inx(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_partition(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_num_cpus(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_num_nodes(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_num_sct(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_shared(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_contiguous(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_pn_min_cpus(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_sockets(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_cores(job_info_t * job, int width, bool right_justify, char* suffix);
int _print_threads(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_pn_min_memory(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_pn_min_tmp_disk(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_req_nodes(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_exc_nodes(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_req_node_inx(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_exc_node_inx(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_features(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_account(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_dependency(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_qos(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_select_jobinfo(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_comment(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_reservation(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_command(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_work_dir(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_nice(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_alloc_nodes(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_alloc_sid(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_assoc_id(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_batch_flag(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_boards_per_node(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_cpus_per_task(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_cpus_per_task(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_derived_ec(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_eligible_time(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_exit_code(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_max_cpus(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_max_nodes(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_network(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_ntasks_per_core(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_ntasks_per_node(job_info_t * job, int width, bool right_justify,
char* suffix);
int _print_job_ntasks_per_socket(job_info_t * job, int width,
bool right_justify, char* suffix);
int _print_job_ntasks_per_board(job_info_t * job, int width,
bool right_justify, char* suffix);
int _print_job_preempt_time(job_info_t * job, int width,
bool right_justify, char* suffix);
int _print_job_preempt_time(job_info_t * job, int width,
bool right_justify, char* suffix);
int _print_job_profile(job_info_t * job, int width,
bool right_justify, char* suffix);
int _print_job_reboot(job_info_t * job, int width,
bool right_justify, char* suffix);
int _print_job_req_switch(job_info_t * job, int width,
bool right_justify, char* suffix);
int _print_job_requeue(job_info_t * job, int width,
bool right_justify, char* suffix);
int _print_job_resize_time(job_info_t * job, int width,
bool right_justify, char* suffix);
int _print_job_restart_cnt(job_info_t * job, int width,
bool right_justify, char* suffix);
int _print_job_sockets_per_board(job_info_t * job, int width,
bool right_justify, char* suffix);
int _print_job_sockets_per_board(job_info_t * job, int width,
bool right_justify, char* suffix);
int _print_job_std_err(job_info_t * job, int width,
bool right_justify, char* suffix);
int _print_job_std_in(job_info_t * job, int width,
bool right_justify, char* suffix);
int _print_job_std_out(job_info_t * job, int width,
bool right_justify, char* suffix);
int _print_job_min_time(job_info_t * job, int width,
bool right_justify, char* suffix);
int _print_job_wait4switch(job_info_t * job, int width,
bool right_justify, char* suffix);
/*****************************************************************************
* Step Print Format Functions
*****************************************************************************/
int step_format_add_function(List list, int width, bool right_justify,
char * suffix,
int (*function) (job_step_info_t *, int, bool, char *));
#define step_format_add_id(list,wid,right,suffix) \
step_format_add_function(list,wid,right,suffix,_print_step_id)
#define step_format_add_partition(list,wid,right,suffix) \
step_format_add_function(list,wid,right,suffix,_print_step_partition)
#define step_format_add_prefix(list,wid,right,prefix) \
step_format_add_function(list,0,0,prefix,_print_step_prefix)
#define step_format_add_user_id(list,wid,right,suffix) \
step_format_add_function(list,wid,right,suffix,_print_step_user_id)
#define step_format_add_user_name(list,wid,right,suffix) \
step_format_add_function(list,wid,right,suffix,_print_step_user_name)
#define step_format_add_time_limit(list,wid,right,suffix) \
step_format_add_function(list,wid,right,suffix,_print_step_time_limit)
#define step_format_add_time_start(list,wid,right,suffix) \
step_format_add_function(list,wid,right,suffix,_print_step_time_start)
#define step_format_add_time_used(list,wid,right,suffix) \
step_format_add_function(list,wid,right,suffix,_print_step_time_used)
#define step_format_add_nodes(list,wid,right,suffix) \
step_format_add_function(list,wid,right,suffix,_print_step_nodes)
#define step_format_add_name(list,wid,right,suffix) \
step_format_add_function(list,wid,right,suffix,_print_step_name)
#define step_format_add_num_tasks(list,wid,right,suffix) \
step_format_add_function(list,wid,right,suffix,_print_step_num_tasks)
#define step_format_add_gres(list,wid,right,suffix) \
step_format_add_function(list,wid,right,suffix,_print_step_gres)
#define step_format_add_invalid(list,wid,right,suffix) \
step_format_add_function(list,wid,right,suffix, \
(void*)_print_com_invalid)
#define step_format_add_array_job_id(list,wid,right,suffix) \
step_format_add_function(list,wid,right,suffix,_print_step_array_job_id)
#define step_format_add_array_task_id(list,wid,right,suffix) \
step_format_add_function(list,wid,right,suffix, \
_print_step_array_task_id)
#define step_format_add_chpt_dir(list,wid,right,suffix) \
step_format_add_function(list,wid,right,suffix,_print_step_chpt_dir)
#define step_format_add_chpt_interval(list,wid,right,suffix) \
step_format_add_function(list,wid,right,suffix, \
_print_step_chpt_interval)
#define step_format_add_job_id(list,wid,right,suffix) \
step_format_add_function(list,wid,right,suffix,_print_step_job_id)
#define step_format_add_network(list,wid,right,suffix) \
step_format_add_function(list,wid,right,suffix,_print_step_network)
#define step_format_add_node_inx(list,wid,right,suffix) \
step_format_add_function(list,wid,right,suffix,_print_step_node_inx)
#define step_format_add_num_cpus(list,wid,right,suffix) \
step_format_add_function(list,wid,right,suffix,_print_step_num_cpus)
#define step_format_add_cpu_freq(list,wid,right,suffix) \
step_format_add_function(list,wid,right,suffix,_print_step_cpu_freq)
#define step_format_add_resv_ports(list,wid,right,suffix) \
step_format_add_function(list,wid,right,suffix,_print_step_resv_ports)
#define step_format_add_step_state(list,wid,right,suffix) \
step_format_add_function(list,wid,right,suffix,_print_step_state)
// finish adding macros and function headers in the .h file.
/*****************************************************************************
* Step Line Print Functions
*****************************************************************************/
int _print_step_id(job_step_info_t * step, int width, bool right_justify,
char *suffix);
int _print_step_partition(job_step_info_t * step, int width,
bool right_justify, char *suffix);
int _print_step_prefix(job_step_info_t * step, int width,
bool right_justify, char *suffix);
int _print_step_user_id(job_step_info_t * step, int width,
bool right_justify, char *suffix);
int _print_step_user_name(job_step_info_t * step, int width,
bool right_justify, char *suffix);
int _print_step_time_limit(job_step_info_t * step, int width,
bool right_justify, char *suffix);
int _print_step_time_start(job_step_info_t * step, int width,
bool right_justify, char *suffix);
int _print_step_time_used(job_step_info_t * step, int width,
bool right_justify, char *suffix);
int _print_step_name(job_step_info_t * step, int width,
bool right_justify, char *suffix);
int _print_step_nodes(job_step_info_t * step, int width,
bool right_justify, char *suffix);
int _print_step_num_tasks(job_step_info_t * step, int width,
bool right_justify, char *suffix);
int _print_step_gres(job_step_info_t * step, int width,
bool right_justify, char *suffix);
int _print_step_array_job_id(job_step_info_t * step, int width, bool right,
char* suffix);
int _print_step_array_task_id(job_step_info_t * step, int width, bool right,
char* suffix);
int _print_step_chpt_dir(job_step_info_t * step, int width, bool right,
char* suffix);
int _print_step_chpt_interval(job_step_info_t * step, int width, bool right,
char* suffix);
int _print_step_job_id(job_step_info_t * step, int width, bool right,
char* suffix);
int _print_step_network(job_step_info_t * step, int width, bool right,
char* suffix);
int _print_step_node_inx(job_step_info_t * step, int width, bool right,
char* suffix);
int _print_step_num_cpus(job_step_info_t * step, int width, bool right,
char* suffix);
int _print_step_cpu_freq(job_step_info_t * step, int width, bool right,
char* suffix);
int _print_step_resv_ports(job_step_info_t * step, int width, bool right,
char* suffix);
int _print_step_state(job_step_info_t * step, int width, bool right,
char* suffix);
/*****************************************************************************
* Common Line Print Functions
*****************************************************************************/
int _print_com_invalid(void * p, int width, bool right_justify, char * suffix);
#endif
| gpl-2.0 |
egbot/Symbiota | vendor/phpoffice/phpspreadsheet/src/PhpSpreadsheet/Calculation/TextData/Replace.php | 2182 | <?php
namespace PhpOffice\PhpSpreadsheet\Calculation\TextData;
use PhpOffice\PhpSpreadsheet\Calculation\Functions;
class Replace
{
/**
* REPLACE.
*
* @param mixed $oldText The text string value to modify
* @param mixed $start Integer offset for start character of the replacement
* @param mixed $chars Integer number of characters to replace from the start offset
* @param mixed $newText String to replace in the defined position
*/
public static function replace($oldText, $start, $chars, $newText): string
{
$oldText = Functions::flattenSingleValue($oldText);
$start = Functions::flattenSingleValue($start);
$chars = Functions::flattenSingleValue($chars);
$newText = Functions::flattenSingleValue($newText);
$left = Extract::left($oldText, $start - 1);
$right = Extract::right($oldText, Text::length($oldText) - ($start + $chars) + 1);
return $left . $newText . $right;
}
/**
* SUBSTITUTE.
*
* @param mixed $text The text string value to modify
* @param mixed $fromText The string value that we want to replace in $text
* @param mixed $toText The string value that we want to replace with in $text
* @param mixed $instance Integer instance Number for the occurrence of frmText to change
*/
public static function substitute($text = '', $fromText = '', $toText = '', $instance = 0): string
{
$text = Functions::flattenSingleValue($text);
$fromText = Functions::flattenSingleValue($fromText);
$toText = Functions::flattenSingleValue($toText);
$instance = floor(Functions::flattenSingleValue($instance));
if ($instance == 0) {
return str_replace($fromText, $toText, $text);
}
$pos = -1;
while ($instance > 0) {
$pos = mb_strpos($text, $fromText, $pos + 1, 'UTF-8');
if ($pos === false) {
break;
}
--$instance;
}
if ($pos !== false) {
return self::REPLACE($text, ++$pos, mb_strlen($fromText, 'UTF-8'), $toText);
}
return $text;
}
}
| gpl-2.0 |
7ShaYaN7/Telegram | TMessagesProj/src/main/java/org/telegram/messenger/ApplicationLoader.java | 14342 | /*
* This is the source code of Telegram for Android v. 3.x.x.
* It is licensed under GNU GPL v. 2 or later.
* You should have received a copy of the license in this archive (see LICENSE).
*
* Copyright Nikolai Kudashov, 2013-2016.
*/
package org.telegram.messenger;
import android.app.Activity;
import android.app.AlarmManager;
import android.app.Application;
import android.app.PendingIntent;
import android.content.BroadcastReceiver;
import android.content.Context;
import android.content.Intent;
import android.content.IntentFilter;
import android.content.SharedPreferences;
import android.content.pm.ApplicationInfo;
import android.content.pm.PackageInfo;
import android.content.res.Configuration;
import android.graphics.drawable.ColorDrawable;
import android.graphics.drawable.Drawable;
import android.os.Build;
import android.os.Handler;
import android.os.PowerManager;
import android.util.Base64;
import com.google.android.gms.common.ConnectionResult;
import com.google.android.gms.common.GooglePlayServicesUtil;
import org.telegram.tgnet.ConnectionsManager;
import org.telegram.tgnet.SerializedData;
import org.telegram.tgnet.TLRPC;
import org.telegram.ui.Components.ForegroundDetector;
import java.io.File;
import java.io.RandomAccessFile;
public class ApplicationLoader extends Application {
private static Drawable cachedWallpaper;
private static int selectedColor;
private static boolean isCustomTheme;
private static final Object sync = new Object();
public static volatile Context applicationContext;
public static volatile Handler applicationHandler;
private static volatile boolean applicationInited = false;
public static volatile boolean isScreenOn = false;
public static volatile boolean mainInterfacePaused = true;
public static boolean isCustomTheme() {
return isCustomTheme;
}
public static int getSelectedColor() {
return selectedColor;
}
public static void reloadWallpaper() {
cachedWallpaper = null;
loadWallpaper();
}
public static void loadWallpaper() {
if (cachedWallpaper != null) {
return;
}
Utilities.searchQueue.postRunnable(new Runnable() {
@Override
public void run() {
synchronized (sync) {
int selectedColor = 0;
try {
SharedPreferences preferences = ApplicationLoader.applicationContext.getSharedPreferences("mainconfig", Activity.MODE_PRIVATE);
int selectedBackground = preferences.getInt("selectedBackground", 1000001);
selectedColor = preferences.getInt("selectedColor", 0);
if (selectedColor == 0) {
if (selectedBackground == 1000001) {
cachedWallpaper = applicationContext.getResources().getDrawable(R.drawable.background_hd);
isCustomTheme = false;
} else {
File toFile = new File(getFilesDirFixed(), "wallpaper.jpg");
if (toFile.exists()) {
cachedWallpaper = Drawable.createFromPath(toFile.getAbsolutePath());
isCustomTheme = true;
} else {
cachedWallpaper = applicationContext.getResources().getDrawable(R.drawable.background_hd);
isCustomTheme = false;
}
}
}
} catch (Throwable throwable) {
//ignore
}
if (cachedWallpaper == null) {
if (selectedColor == 0) {
selectedColor = -2693905;
}
cachedWallpaper = new ColorDrawable(selectedColor);
}
}
}
});
}
public static Drawable getCachedWallpaper() {
synchronized (sync) {
return cachedWallpaper;
}
}
private static void convertConfig() {
SharedPreferences preferences = ApplicationLoader.applicationContext.getSharedPreferences("dataconfig", Context.MODE_PRIVATE);
if (preferences.contains("currentDatacenterId")) {
SerializedData buffer = new SerializedData(32 * 1024);
buffer.writeInt32(2);
buffer.writeBool(preferences.getInt("datacenterSetId", 0) != 0);
buffer.writeBool(true);
buffer.writeInt32(preferences.getInt("currentDatacenterId", 0));
buffer.writeInt32(preferences.getInt("timeDifference", 0));
buffer.writeInt32(preferences.getInt("lastDcUpdateTime", 0));
buffer.writeInt64(preferences.getLong("pushSessionId", 0));
buffer.writeBool(false);
buffer.writeInt32(0);
try {
String datacentersString = preferences.getString("datacenters", null);
if (datacentersString != null) {
byte[] datacentersBytes = Base64.decode(datacentersString, Base64.DEFAULT);
if (datacentersBytes != null) {
SerializedData data = new SerializedData(datacentersBytes);
buffer.writeInt32(data.readInt32(false));
buffer.writeBytes(datacentersBytes, 4, datacentersBytes.length - 4);
data.cleanup();
}
}
} catch (Exception e) {
FileLog.e("tmessages", e);
}
try {
File file = new File(getFilesDirFixed(), "tgnet.dat");
RandomAccessFile fileOutputStream = new RandomAccessFile(file, "rws");
byte[] bytes = buffer.toByteArray();
fileOutputStream.writeInt(Integer.reverseBytes(bytes.length));
fileOutputStream.write(bytes);
fileOutputStream.close();
} catch (Exception e) {
FileLog.e("tmessages", e);
}
buffer.cleanup();
preferences.edit().clear().commit();
}
}
public static File getFilesDirFixed() {
for (int a = 0; a < 10; a++) {
File path = ApplicationLoader.applicationContext.getFilesDir();
if (path != null) {
return path;
}
}
try {
ApplicationInfo info = applicationContext.getApplicationInfo();
File path = new File(info.dataDir, "files");
path.mkdirs();
return path;
} catch (Exception e) {
FileLog.e("tmessages", e);
}
return new File("/data/data/org.telegram.messenger/files");
}
public static void postInitApplication() {
if (applicationInited) {
return;
}
applicationInited = true;
convertConfig();
try {
LocaleController.getInstance();
} catch (Exception e) {
e.printStackTrace();
}
try {
final IntentFilter filter = new IntentFilter(Intent.ACTION_SCREEN_ON);
filter.addAction(Intent.ACTION_SCREEN_OFF);
final BroadcastReceiver mReceiver = new ScreenReceiver();
applicationContext.registerReceiver(mReceiver, filter);
} catch (Exception e) {
e.printStackTrace();
}
try {
PowerManager pm = (PowerManager)ApplicationLoader.applicationContext.getSystemService(Context.POWER_SERVICE);
isScreenOn = pm.isScreenOn();
FileLog.e("tmessages", "screen state = " + isScreenOn);
} catch (Exception e) {
FileLog.e("tmessages", e);
}
UserConfig.loadConfig();
String deviceModel;
String langCode;
String appVersion;
String systemVersion;
String configPath = getFilesDirFixed().toString();
try {
langCode = LocaleController.getLocaleString(LocaleController.getInstance().getSystemDefaultLocale());
deviceModel = Build.MANUFACTURER + Build.MODEL;
PackageInfo pInfo = ApplicationLoader.applicationContext.getPackageManager().getPackageInfo(ApplicationLoader.applicationContext.getPackageName(), 0);
appVersion = pInfo.versionName + " (" + pInfo.versionCode + ")";
systemVersion = "SDK " + Build.VERSION.SDK_INT;
} catch (Exception e) {
langCode = "en";
deviceModel = "Android unknown";
appVersion = "App version unknown";
systemVersion = "SDK " + Build.VERSION.SDK_INT;
}
if (langCode.trim().length() == 0) {
langCode = "en";
}
if (deviceModel.trim().length() == 0) {
deviceModel = "Android unknown";
}
if (appVersion.trim().length() == 0) {
appVersion = "App version unknown";
}
if (systemVersion.trim().length() == 0) {
systemVersion = "SDK Unknown";
}
MessagesController.getInstance();
ConnectionsManager.getInstance().init(BuildVars.BUILD_VERSION, TLRPC.LAYER, BuildVars.APP_ID, deviceModel, systemVersion, appVersion, langCode, configPath, FileLog.getNetworkLogPath(), UserConfig.getClientUserId());
if (UserConfig.getCurrentUser() != null) {
MessagesController.getInstance().putUser(UserConfig.getCurrentUser(), true);
ConnectionsManager.getInstance().applyCountryPortNumber(UserConfig.getCurrentUser().phone);
MessagesController.getInstance().getBlockedUsers(true);
SendMessagesHelper.getInstance().checkUnsentMessages();
}
ApplicationLoader app = (ApplicationLoader)ApplicationLoader.applicationContext;
app.initPlayServices();
FileLog.e("tmessages", "app initied");
ContactsController.getInstance().checkAppAccount();
MediaController.getInstance();
}
@Override
public void onCreate() {
super.onCreate();
if (Build.VERSION.SDK_INT < 11) {
java.lang.System.setProperty("java.net.preferIPv4Stack", "true");
java.lang.System.setProperty("java.net.preferIPv6Addresses", "false");
}
applicationContext = getApplicationContext();
NativeLoader.initNativeLibs(ApplicationLoader.applicationContext);
ConnectionsManager.native_setJava(Build.VERSION.SDK_INT == 14 || Build.VERSION.SDK_INT == 15);
if (Build.VERSION.SDK_INT >= 14) {
new ForegroundDetector(this);
}
applicationHandler = new Handler(applicationContext.getMainLooper());
startPushService();
}
public static void startPushService() {
SharedPreferences preferences = applicationContext.getSharedPreferences("Notifications", MODE_PRIVATE);
if (preferences.getBoolean("pushService", true)) {
applicationContext.startService(new Intent(applicationContext, NotificationsService.class));
if (android.os.Build.VERSION.SDK_INT >= 19) {
// Calendar cal = Calendar.getInstance();
// PendingIntent pintent = PendingIntent.getService(applicationContext, 0, new Intent(applicationContext, NotificationsService.class), 0);
// AlarmManager alarm = (AlarmManager) applicationContext.getSystemService(Context.ALARM_SERVICE);
// alarm.setRepeating(AlarmManager.RTC_WAKEUP, cal.getTimeInMillis(), 30000, pintent);
PendingIntent pintent = PendingIntent.getService(applicationContext, 0, new Intent(applicationContext, NotificationsService.class), 0);
AlarmManager alarm = (AlarmManager)applicationContext.getSystemService(Context.ALARM_SERVICE);
alarm.cancel(pintent);
}
} else {
stopPushService();
}
}
public static void stopPushService() {
applicationContext.stopService(new Intent(applicationContext, NotificationsService.class));
PendingIntent pintent = PendingIntent.getService(applicationContext, 0, new Intent(applicationContext, NotificationsService.class), 0);
AlarmManager alarm = (AlarmManager)applicationContext.getSystemService(Context.ALARM_SERVICE);
alarm.cancel(pintent);
}
@Override
public void onConfigurationChanged(Configuration newConfig) {
super.onConfigurationChanged(newConfig);
try {
LocaleController.getInstance().onDeviceConfigurationChange(newConfig);
AndroidUtilities.checkDisplaySize();
} catch (Exception e) {
e.printStackTrace();
}
}
private void initPlayServices() {
AndroidUtilities.runOnUIThread(new Runnable() {
@Override
public void run() {
if (checkPlayServices()) {
if (UserConfig.pushString == null || UserConfig.pushString.length() == 0) {
FileLog.d("tmessages", "GCM Registration not found.");
Intent intent = new Intent(applicationContext, GcmRegistrationIntentService.class);
startService(intent);
} else {
FileLog.d("tmessages", "GCM regId = " + UserConfig.pushString);
}
} else {
FileLog.d("tmessages", "No valid Google Play Services APK found.");
}
}
}, 1000);
}
private boolean checkPlayServices() {
int resultCode = GooglePlayServicesUtil.isGooglePlayServicesAvailable(this);
return resultCode == ConnectionResult.SUCCESS;
/*if (resultCode != ConnectionResult.SUCCESS) {
if (GooglePlayServicesUtil.isUserRecoverableError(resultCode)) {
GooglePlayServicesUtil.getErrorDialog(resultCode, this, PLAY_SERVICES_RESOLUTION_REQUEST).show();
} else {
Log.i("tmessages", "This device is not supported.");
}
return false;
}
return true;*/
}
}
| gpl-2.0 |
LorenK96/slide-desktop | src/main/java/enums/ConnectionMode.java | 65 | package enums;
public enum ConnectionMode {
WIFI,
USB
}
| gpl-2.0 |
msekletar/mgetty | voice/libvoice/Multitech_5634ZPX.c | 10960 | /*
* Multitech_5634ZPX.c
*
* Hacked by <[email protected]>. Maybe will be merged
* with the other Multitech driver.
*
* $Id: Multitech_5634ZPX.c,v 1.4 2006/09/26 17:17:56 gert Exp $
*
* Some functions can't be static because inherited by Multitech_5634ZPX_ISA.
* Copied by md 2000/12/14
*/
#include "../include/voice.h"
static char mode_save[16] = "";
int Multitech_5634ZPX_answer_phone(void)
{
int result;
reset_watchdog();
if (((result = voice_command("AT+VLS=1", "OK|CONNECT")) & VMA_USER) !=
VMA_USER)
return(VMA_ERROR);
if (result == VMA_USER_2)
return(VMA_CONNECT);
return(VMA_OK);
}
int Multitech_5634ZPX_init(void)
{
char buffer[VOICE_BUF_LEN];
reset_watchdog();
voice_modem_state = INITIALIZING;
lprintf(L_MESG, "initializing %s voice modem", voice_modem->name);
/*
* AT+VIT=10 - Set inactivity timer to 10 seconds
*/
if (voice_command("AT+VIT=10", "OK") != VMA_USER_1)
lprintf(L_WARN, "voice init failed, continuing");
#if 0
/*
* AT+VDD=x,y - Set DTMF tone detection threshold and duration detection
*/
sprintf(buffer, "AT+VDD=%d,%d", cvd.dtmf_threshold.d.i *
31 / 100, cvd.dtmf_len.d.i / 5);
if (voice_command(buffer, "OK") != VMA_USER_1)
lprintf(L_WARN, "setting DTMF preferences didn't work");
#endif
/*
* AT+VSD=x,y - Set silence threshold and duration. 0-256, .1sec
*/
sprintf(buffer, "AT+VSD=%d,%d", /*(int)cvd.rec_silence_threshold.d.i *
1 / 100 +*/ 128, (int)cvd.rec_silence_len.d.i);
if (voice_command(buffer, "OK") != VMA_USER_1)
lprintf(L_WARN, "setting recording preferences didn't work");
/*
* AT+VGT - Set the transmit gain for voice samples. (128 is 1.0)
*/
if (cvd.transmit_gain.d.i == -1)
cvd.transmit_gain.d.i = 50;
sprintf(buffer, "AT+VGT=%d", (int)cvd.transmit_gain.d.i * 144 / 100 + 56);
if (voice_command(buffer, "OK") != VMA_USER_1)
lprintf(L_WARN, "setting transmit gain didn't work");
/*
* AT+VGR - Set receive gain for voice samples. (128 is 1.0)
*/
if (cvd.receive_gain.d.i == -1)
cvd.receive_gain.d.i = 50;
sprintf(buffer, "AT+VGR=%d", (int)cvd.receive_gain.d.i * 144 / 100 + 56);
if (voice_command(buffer, "OK") != VMA_USER_1)
lprintf(L_WARN, "setting receive gain didn't work");
voice_modem_state = IDLE;
return(OK);
}
int Multitech_5634ZPX_set_compression(p_int *compression, p_int *speed,
int *bits)
{
char buffer[VOICE_BUF_LEN];
reset_watchdog();
if (*compression == 0)
*compression = 132;
if (*speed == 0)
*speed = 8000;
if (*speed != 8000)
{
lprintf(L_WARN, "%s: Illegal sample rate (%d)", voice_modem_name,
(int)*speed);
return(FAIL);
}
/*
VSM=cml,vsr,scs,sel
cml: 128-256 (compression method)
128,"8-BIT LINEAR",(7200,8000,11025)
129,"16-BIT LINEAR",(7200,8000,11025)
130,"8-BIT ALAW",(8000)
131,"8-BIT ULAW",(8000)
132,"IMA ADPCM",(7200,8000,11025)
vsr: (voice sample rate)
scs: 0 (disabled), 1-n (how much noise is silence)
sel: 0 (disabled), 1-n (.1 sec incr: silence expansion)
*/
switch (*compression)
{
case 4:
*bits = 4;
sprintf(buffer, "AT+VSM=2,%d", (int)*speed);
if (voice_command(buffer, "OK") != VMA_USER_1)
return(FAIL);
break;
case 132:
*bits = 4;
sprintf(buffer, "AT+VSM=132,%d", (int)*speed);
if (voice_command(buffer, "OK") != VMA_USER_1)
return(FAIL);
break;
default:
lprintf(L_WARN, "%s: Illegal voice compression method (%d)",
voice_modem_name, (int)*compression);
return(FAIL);
}
return(OK);
}
static int Multitech_5634ZPX_set_device(int device)
{
reset_watchdog();
/*
0,"",B0000000,B0000000,B0000000
1,"T",0BC01800,0BC01800,0BC01800
2,"L",00000000,00000000,B0000000
3,"LT",0BC01800,0BC01800,0BC01800
4,"S",00000000,00000000,B0000000
5,"ST",0BC01800,0BC01800,0BC01800
6,"M",00000000,00000000,B0000000
7,"MST",0BC01800,0BC01800,0BC01800
where:
0 "" on-hook, local phone->telco
1 T off-hook, modem->telco
2 L off-hook, local phone->telco
3 LT off-hook, local phone&modem->telco
4 S on-hook, spkr->modem, local phone->telco
5 ST off-hook, spkr->telco, modem->telco
6 M on-hook, mike->modem, local phone->telco
7 MST off-hook, mike&spkr->telco, modem->telco
*/
switch (device)
{
case NO_DEVICE:
voice_command("AT+VLS=0", "OK");
return(OK);
case LOCAL_HANDSET:
voice_command("AT+VLS=2", "OK");
return(OK);
case DIALUP_LINE:
voice_command("AT+VLS=0", "OK"); /* MD: Changed from 1 to 0 -> now vm dials out */
return(OK);
case EXTERNAL_MICROPHONE:
voice_command("AT+VLS=11", "OK");
return(OK);
case INTERNAL_SPEAKER:
voice_command("AT+VLS=4", "OK");
return(OK);
};
lprintf(L_WARN, "%s: Unknown output device (%d)", voice_modem_name,
device);
return(FAIL);
}
void Multitech_5634ZPX_fix_modem(int expect_error)
{
char buffer[VOICE_BUF_LEN];
int result = VMA_FAIL;
if (!cvd.enable_command_echo.d.i) {
/* Multitech 2834 ZDXV modem (ROM 0416A NORTH AMERICAN)
* -- [email protected]
* As I understand the problem -- some Multitech 2834 ZDXv
* modems garble the command echo at certain points. To counter
* this, a dummy command is sent at those points (just AT)
* and the echo is ignored: this is the purpose of
* Multitech_2834_fix_modem(). If echo is off, there is
* no need to prevent garbled commands echos, and thus no need
* for Multitech_2834_fix_modem() (in fact, it would fail).
*/
return;
}
/* my ZDXv with 0416A firmware seems to exhibit a bug here -
* if you send the modem 'AT', it echos 'TA'. If you send it
* 'ATI', it echos 'TIA'!
*/
voice_write("AT");
do {
if (voice_read(buffer) != OK) {
voice_flush(1);
break;
}
result = voice_analyze(buffer, "AT", TRUE);
if (result == VMA_FAIL) {
voice_flush(1);
break;
}
if (result == VMA_ERROR) {
lprintf(L_WARN, "%s: Modem returned ERROR",
program_name);
voice_flush(1);
break;
}
} while (result != VMA_USER_1);
if (result == VMA_USER_1 && expect_error)
lprintf(L_WARN, "%s: Modem answered correctly", program_name);
if (result != VMA_USER_1 && !expect_error)
lprintf(L_WARN, "%s: Modem answered incorrectly", program_name);
}
int Multitech_5634ZPX_switch_to_data_fax(char *mode)
{
char buffer[VOICE_BUF_LEN];
sprintf(buffer, "%s%s", voice_modem->switch_mode_cmnd, mode);
if ((voice_command(buffer, voice_modem->switch_mode_answr) & VMA_USER) !=
VMA_USER)
return FAIL;
Multitech_5634ZPX_fix_modem(1);
return OK;
}
int Multitech_5634ZPX_voice_mode_off(void)
{
char buffer[VOICE_BUF_LEN];
sprintf(buffer, "%s%s", voice_modem->switch_mode_cmnd, mode_save);
if ((voice_command(buffer, voice_modem->switch_mode_answr) & VMA_USER) !=
VMA_USER)
return FAIL;
Multitech_5634ZPX_fix_modem(1);
return OK;
}
int Multitech_5634ZPX_voice_mode_on(void)
{
char buffer[VOICE_BUF_LEN];
if (voice_command(voice_modem->ask_mode_cmnd, "") != OK)
return FAIL;
do {
if (voice_read(mode_save) != OK)
return FAIL;
} while (strlen(mode_save) == 0);
if (strncmp(mode_save, "+FCLASS=", 8) == 0)
memmove(mode_save, mode_save + 8, strlen(mode_save) - 8 + 1);
if ((voice_command("", voice_modem->ask_mode_answr) & VMA_USER) != VMA_USER)
return FAIL;
sprintf(buffer, "%s%s", voice_modem->switch_mode_cmnd,
voice_modem->voice_mode_id);
if ((voice_command(buffer, voice_modem->switch_mode_answr) & VMA_USER) !=
VMA_USER)
return FAIL;
Multitech_5634ZPX_fix_modem(0);
return OK;
}
#define Multitech_beep_timeunit 100
voice_modem_struct Multitech_5634ZPX =
{
"Multitech 5634ZPX",
"Multitech5634",
(char *) IS_101_pick_phone_cmnd,
(char *) IS_101_pick_phone_answr,
(char *) IS_101_beep_cmnd,
(char *) IS_101_beep_answr,
Multitech_beep_timeunit,
(char *) IS_101_hardflow_cmnd,
(char *) IS_101_hardflow_answr,
(char *) IS_101_softflow_cmnd,
(char *) IS_101_softflow_answr,
(char *) IS_101_start_play_cmnd,
(char *) IS_101_start_play_answer,
(char *) IS_101_reset_play_cmnd,
(char *) IS_101_intr_play_cmnd,
(char *) IS_101_intr_play_answr,
(char *) IS_101_stop_play_cmnd,
(char *) IS_101_stop_play_answr,
(char *) IS_101_start_rec_cmnd,
(char *) IS_101_start_rec_answr,
(char *) IS_101_stop_rec_cmnd,
(char *) IS_101_stop_rec_answr,
(char *) IS_101_switch_mode_cmnd,
(char *) IS_101_switch_mode_answr,
(char *) IS_101_ask_mode_cmnd,
(char *) IS_101_ask_mode_answr,
(char *) IS_101_voice_mode_id,
(char *) IS_101_play_dtmf_cmd,
(char *) IS_101_play_dtmf_extra,
(char *) IS_101_play_dtmf_answr,
// [email protected] : voice-duplex-patch start
NULL, /* (char *) V253modem_start_duplex_voice_cmnd, */
NULL, /* (char *) V253modemstart_duplex_voice_answr, */
NULL, /* (char *) V253modem_stop_duplex_voice_cmnd , */
NULL, /* (char *) V253modem_stop_duplex_voice_answr, */
// [email protected] : voice-duplex-patch end
&Multitech_5634ZPX_answer_phone,
&IS_101_beep,
&IS_101_dial,
&IS_101_handle_dle,
&Multitech_5634ZPX_init,
&IS_101_message_light_off,
&IS_101_message_light_on,
&IS_101_start_play_file,
&IS_101_reset_play_file,
&IS_101_stop_play_file,
&IS_101_play_file,
&IS_101_record_file,
&Multitech_5634ZPX_set_compression,
&Multitech_5634ZPX_set_device,
&IS_101_stop_dialing,
&IS_101_stop_playing,
&IS_101_stop_recording,
&IS_101_stop_waiting,
&Multitech_5634ZPX_switch_to_data_fax,
&Multitech_5634ZPX_voice_mode_off,
&Multitech_5634ZPX_voice_mode_on,
&IS_101_wait,
&IS_101_play_dtmf,
&IS_101_check_rmd_adequation,
// [email protected] : voice-duplex-patch start
&IS_101_handle_duplex_voice,
NULL, /* since there is no way to enter duplex voice state */
// [email protected] : voice-duplex-patch end
0
};
| gpl-2.0 |
govindx7c6/libvirt | src/rpc/virnetserverservice.c | 14926 | /*
* virnetserverservice.c: generic network RPC server service
*
* Copyright (C) 2006-2012, 2014 Red Hat, Inc.
* Copyright (C) 2006 Daniel P. Berrange
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library. If not, see
* <http://www.gnu.org/licenses/>.
*
* Author: Daniel P. Berrange <[email protected]>
*/
#include <config.h>
#include "virnetserverservice.h"
#include <unistd.h>
#include "viralloc.h"
#include "virerror.h"
#include "virthread.h"
#define VIR_FROM_THIS VIR_FROM_RPC
struct _virNetServerService {
virObject object;
size_t nsocks;
virNetSocketPtr *socks;
int auth;
bool readonly;
size_t nrequests_client_max;
#if WITH_GNUTLS
virNetTLSContextPtr tls;
#endif
virNetServerServiceDispatchFunc dispatchFunc;
void *dispatchOpaque;
};
static virClassPtr virNetServerServiceClass;
static void virNetServerServiceDispose(void *obj);
static int virNetServerServiceOnceInit(void)
{
if (!(virNetServerServiceClass = virClassNew(virClassForObject(),
"virNetServerService",
sizeof(virNetServerService),
virNetServerServiceDispose)))
return -1;
return 0;
}
VIR_ONCE_GLOBAL_INIT(virNetServerService)
static void virNetServerServiceAccept(virNetSocketPtr sock,
int events ATTRIBUTE_UNUSED,
void *opaque)
{
virNetServerServicePtr svc = opaque;
virNetSocketPtr clientsock = NULL;
if (virNetSocketAccept(sock, &clientsock) < 0)
goto cleanup;
if (!clientsock) /* Connection already went away */
goto cleanup;
if (!svc->dispatchFunc)
goto cleanup;
svc->dispatchFunc(svc, clientsock, svc->dispatchOpaque);
cleanup:
virObjectUnref(clientsock);
}
virNetServerServicePtr
virNetServerServiceNewFDOrUNIX(const char *path,
mode_t mask,
gid_t grp,
int auth,
#if WITH_GNUTLS
virNetTLSContextPtr tls,
#endif
bool readonly,
size_t max_queued_clients,
size_t nrequests_client_max,
unsigned int nfds,
unsigned int *cur_fd)
{
if (*cur_fd - STDERR_FILENO > nfds) {
/*
* There are no more file descriptors to use, so we have to
* fallback to UNIX socket.
*/
return virNetServerServiceNewUNIX(path,
mask,
grp,
auth,
#if WITH_GNUTLS
tls,
#endif
readonly,
max_queued_clients,
nrequests_client_max);
} else {
/*
* There's still enough file descriptors. In this case we'll
* use the current one and increment it afterwards. Take care
* with order of operation for pointer arithmetic and auto
* increment on cur_fd - the parentheses are necessary.
*/
return virNetServerServiceNewFD((*cur_fd)++,
auth,
#if WITH_GNUTLS
tls,
#endif
readonly,
max_queued_clients,
nrequests_client_max);
}
}
virNetServerServicePtr virNetServerServiceNewTCP(const char *nodename,
const char *service,
int auth,
#if WITH_GNUTLS
virNetTLSContextPtr tls,
#endif
bool readonly,
size_t max_queued_clients,
size_t nrequests_client_max)
{
virNetServerServicePtr svc;
size_t i;
if (virNetServerServiceInitialize() < 0)
return NULL;
if (!(svc = virObjectNew(virNetServerServiceClass)))
return NULL;
svc->auth = auth;
svc->readonly = readonly;
svc->nrequests_client_max = nrequests_client_max;
#if WITH_GNUTLS
svc->tls = virObjectRef(tls);
#endif
if (virNetSocketNewListenTCP(nodename,
service,
&svc->socks,
&svc->nsocks) < 0)
goto error;
for (i = 0; i < svc->nsocks; i++) {
if (virNetSocketListen(svc->socks[i], max_queued_clients) < 0)
goto error;
/* IO callback is initially disabled, until we're ready
* to deal with incoming clients */
virObjectRef(svc);
if (virNetSocketAddIOCallback(svc->socks[i],
0,
virNetServerServiceAccept,
svc,
virObjectFreeCallback) < 0) {
virObjectUnref(svc);
goto error;
}
}
return svc;
error:
virObjectUnref(svc);
return NULL;
}
virNetServerServicePtr virNetServerServiceNewUNIX(const char *path,
mode_t mask,
gid_t grp,
int auth,
#if WITH_GNUTLS
virNetTLSContextPtr tls,
#endif
bool readonly,
size_t max_queued_clients,
size_t nrequests_client_max)
{
virNetServerServicePtr svc;
size_t i;
if (virNetServerServiceInitialize() < 0)
return NULL;
if (!(svc = virObjectNew(virNetServerServiceClass)))
return NULL;
svc->auth = auth;
svc->readonly = readonly;
svc->nrequests_client_max = nrequests_client_max;
#if WITH_GNUTLS
svc->tls = virObjectRef(tls);
#endif
svc->nsocks = 1;
if (VIR_ALLOC_N(svc->socks, svc->nsocks) < 0)
goto error;
if (virNetSocketNewListenUNIX(path,
mask,
-1,
grp,
&svc->socks[0]) < 0)
goto error;
for (i = 0; i < svc->nsocks; i++) {
if (virNetSocketListen(svc->socks[i], max_queued_clients) < 0)
goto error;
/* IO callback is initially disabled, until we're ready
* to deal with incoming clients */
virObjectRef(svc);
if (virNetSocketAddIOCallback(svc->socks[i],
0,
virNetServerServiceAccept,
svc,
virObjectFreeCallback) < 0) {
virObjectUnref(svc);
goto error;
}
}
return svc;
error:
virObjectUnref(svc);
return NULL;
}
virNetServerServicePtr virNetServerServiceNewFD(int fd,
int auth,
#if WITH_GNUTLS
virNetTLSContextPtr tls,
#endif
bool readonly,
size_t max_queued_clients,
size_t nrequests_client_max)
{
virNetServerServicePtr svc;
size_t i;
if (virNetServerServiceInitialize() < 0)
return NULL;
if (!(svc = virObjectNew(virNetServerServiceClass)))
return NULL;
svc->auth = auth;
svc->readonly = readonly;
svc->nrequests_client_max = nrequests_client_max;
#if WITH_GNUTLS
svc->tls = virObjectRef(tls);
#endif
svc->nsocks = 1;
if (VIR_ALLOC_N(svc->socks, svc->nsocks) < 0)
goto error;
if (virNetSocketNewListenFD(fd,
&svc->socks[0]) < 0)
goto error;
for (i = 0; i < svc->nsocks; i++) {
if (virNetSocketListen(svc->socks[i], max_queued_clients) < 0)
goto error;
/* IO callback is initially disabled, until we're ready
* to deal with incoming clients */
if (virNetSocketAddIOCallback(svc->socks[i],
0,
virNetServerServiceAccept,
svc,
virObjectFreeCallback) < 0)
goto error;
}
return svc;
error:
virObjectUnref(svc);
return NULL;
}
virNetServerServicePtr virNetServerServiceNewPostExecRestart(virJSONValuePtr object)
{
virNetServerServicePtr svc;
virJSONValuePtr socks;
size_t i;
int n;
unsigned int max;
if (virNetServerServiceInitialize() < 0)
return NULL;
if (!(svc = virObjectNew(virNetServerServiceClass)))
return NULL;
if (virJSONValueObjectGetNumberInt(object, "auth", &svc->auth) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("Missing auth field in JSON state document"));
goto error;
}
if (virJSONValueObjectGetBoolean(object, "readonly", &svc->readonly) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("Missing readonly field in JSON state document"));
goto error;
}
if (virJSONValueObjectGetNumberUint(object, "nrequests_client_max",
&max) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("Missing nrequests_client_max field in JSON state document"));
goto error;
}
svc->nrequests_client_max = max;
if (!(socks = virJSONValueObjectGet(object, "socks"))) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("Missing socks field in JSON state document"));
goto error;
}
if ((n = virJSONValueArraySize(socks)) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("socks field in JSON was not an array"));
goto error;
}
svc->nsocks = n;
if (VIR_ALLOC_N(svc->socks, svc->nsocks) < 0)
goto error;
for (i = 0; i < svc->nsocks; i++) {
virJSONValuePtr child = virJSONValueArrayGet(socks, i);
virNetSocketPtr sock;
if (!(sock = virNetSocketNewPostExecRestart(child))) {
virObjectUnref(sock);
goto error;
}
svc->socks[i] = sock;
/* IO callback is initially disabled, until we're ready
* to deal with incoming clients */
virObjectRef(svc);
if (virNetSocketAddIOCallback(sock,
0,
virNetServerServiceAccept,
svc,
virObjectFreeCallback) < 0) {
virObjectUnref(svc);
virObjectUnref(sock);
goto error;
}
}
return svc;
error:
virObjectUnref(svc);
return NULL;
}
virJSONValuePtr virNetServerServicePreExecRestart(virNetServerServicePtr svc)
{
virJSONValuePtr object = virJSONValueNewObject();
virJSONValuePtr socks;
size_t i;
if (!object)
return NULL;
if (virJSONValueObjectAppendNumberInt(object, "auth", svc->auth) < 0)
goto error;
if (virJSONValueObjectAppendBoolean(object, "readonly", svc->readonly) < 0)
goto error;
if (virJSONValueObjectAppendNumberUint(object, "nrequests_client_max", svc->nrequests_client_max) < 0)
goto error;
if (!(socks = virJSONValueNewArray()))
goto error;
if (virJSONValueObjectAppend(object, "socks", socks) < 0) {
virJSONValueFree(socks);
goto error;
}
for (i = 0; i < svc->nsocks; i++) {
virJSONValuePtr child;
if (!(child = virNetSocketPreExecRestart(svc->socks[i])))
goto error;
if (virJSONValueArrayAppend(socks, child) < 0) {
virJSONValueFree(child);
goto error;
}
}
return object;
error:
virJSONValueFree(object);
return NULL;
}
int virNetServerServiceGetPort(virNetServerServicePtr svc)
{
/* We're assuming if there are multiple sockets
* for IPv4 & 6, then they are all on same port */
return virNetSocketGetPort(svc->socks[0]);
}
int virNetServerServiceGetAuth(virNetServerServicePtr svc)
{
return svc->auth;
}
bool virNetServerServiceIsReadonly(virNetServerServicePtr svc)
{
return svc->readonly;
}
size_t virNetServerServiceGetMaxRequests(virNetServerServicePtr svc)
{
return svc->nrequests_client_max;
}
#if WITH_GNUTLS
virNetTLSContextPtr virNetServerServiceGetTLSContext(virNetServerServicePtr svc)
{
return svc->tls;
}
#endif
void virNetServerServiceSetDispatcher(virNetServerServicePtr svc,
virNetServerServiceDispatchFunc func,
void *opaque)
{
svc->dispatchFunc = func;
svc->dispatchOpaque = opaque;
}
void virNetServerServiceDispose(void *obj)
{
virNetServerServicePtr svc = obj;
size_t i;
for (i = 0; i < svc->nsocks; i++)
virObjectUnref(svc->socks[i]);
VIR_FREE(svc->socks);
#if WITH_GNUTLS
virObjectUnref(svc->tls);
#endif
}
void virNetServerServiceToggle(virNetServerServicePtr svc,
bool enabled)
{
size_t i;
for (i = 0; i < svc->nsocks; i++)
virNetSocketUpdateIOCallback(svc->socks[i],
enabled ?
VIR_EVENT_HANDLE_READABLE :
0);
}
void virNetServerServiceClose(virNetServerServicePtr svc)
{
size_t i;
if (!svc)
return;
for (i = 0; i < svc->nsocks; i++) {
virNetSocketClose(svc->socks[i]);
}
}
| gpl-2.0 |
eduardmokrov/study | wp-content/themes/coursepress/content.php | 2347 | <?php
/**
* @package CoursePress
*/
?>
<article id="post-<?php the_ID(); ?>" <?php post_class(); ?>>
<header class="entry-header">
<h1 class="entry-title"><a href="<?php the_permalink(); ?>" rel="bookmark"><?php the_title(); ?></a></h1>
<?php
if ( has_post_thumbnail() ) {
echo '<div class="featured-image">';
the_post_thumbnail();
echo '</div>';
}
?>
<?php if ( 'post' == get_post_type() ) : ?>
<div class="entry-meta">
<?php coursepress_posted_on(); ?>
</div><!-- .entry-meta -->
<?php endif; ?>
</header><!-- .entry-header -->
<?php if ( is_search() ) : // Only display Excerpts for Search ?>
<div class="entry-summary">
<?php the_excerpt(); ?>
</div><!-- .entry-summary -->
<?php else : ?>
<div class="entry-content">
<?php the_content( __( 'Continue reading <span class="meta-nav">→</span>', 'cp' ) ); ?>
<?php
wp_link_pages( array(
'before' => '<div class="page-links">' . __( 'Pages:', 'cp' ),
'after' => '</div>',
) );
?>
</div><!-- .entry-content -->
<?php endif; ?>
<footer class="entry-meta">
<?php if ( 'post' == get_post_type() ) : // Hide category and tag text for pages on Search ?>
<?php
/* translators: used between list items, there is a space after the comma */
$categories_list = get_the_category_list( __( ', ', 'cp' ) );
if ( $categories_list && coursepress_categorized_blog() ) :
?>
<span class="cat-links">
<?php printf( __( 'Posted in %1$s', 'cp' ), $categories_list ); ?>
</span>
<?php endif; // End if categories ?>
<?php
/* translators: used between list items, there is a space after the comma */
$tags_list = get_the_tag_list( '', __( ', ', 'cp' ) );
if ( $tags_list ) :
?>
<span class="tags-links">
<?php printf( __( 'Tagged %1$s', 'cp' ), $tags_list ); ?>
</span>
<?php endif; // End if $tags_list ?>
<?php endif; // End if 'post' == get_post_type() ?>
<?php if ( ! post_password_required() && ( comments_open() || '0' != get_comments_number() ) ) : ?>
<span class="comments-link"><?php comments_popup_link( __( 'Leave a comment', 'cp' ), __( '1 Comment', 'cp' ), __( '% Comments', 'cp' ) ); ?></span>
<?php endif; ?>
<?php edit_post_link( __( 'Edit', 'cp' ), '<span class="edit-link">', '</span>' ); ?>
</footer><!-- .entry-meta -->
</article><!-- #post-## -->
| gpl-2.0 |
pghmcfc/proftpd | tests/t/lib/ProFTPD/Tests/Config/Limit/HELP.pm | 5946 | package ProFTPD::Tests::Config::Limit::HELP;
use lib qw(t/lib);
use base qw(ProFTPD::TestSuite::Child);
use strict;
use File::Path qw(mkpath);
use File::Spec;
use IO::Handle;
use ProFTPD::TestSuite::FTP;
use ProFTPD::TestSuite::Utils qw(:auth :config :running :test :testsuite);
$| = 1;
my $order = 0;
my $TESTS = {
limit_help_issue1296 => {
order => ++$order,
test_class => [qw(forking)],
},
limit_site_help_issue1296 => {
order => ++$order,
test_class => [qw(forking)],
},
};
sub new {
return shift()->SUPER::new(@_);
}
sub list_tests {
return testsuite_get_runnable_tests($TESTS);
}
sub limit_help_issue1296 {
my $self = shift;
my $tmpdir = $self->{tmpdir};
my $setup = test_setup($tmpdir, 'limit');
my $config = {
PidFile => $setup->{pid_file},
ScoreboardFile => $setup->{scoreboard_file},
SystemLog => $setup->{log_file},
TraceLog => $setup->{log_file},
Trace => 'command:10 directory:10',
AuthUserFile => $setup->{auth_user_file},
AuthGroupFile => $setup->{auth_group_file},
DefaultChdir => '~',
IfModules => {
'mod_delay.c' => {
DelayEngine => 'off',
},
},
Limit => {
HELP => {
DenyAll => '',
},
}
};
my ($port, $config_user, $config_group) = config_write($setup->{config_file},
$config);
# Open pipes, for use between the parent and child processes. Specifically,
# the child will indicate when it's done with its test by writing a message
# to the parent.
my ($rfh, $wfh);
unless (pipe($rfh, $wfh)) {
die("Can't open pipe: $!");
}
my $ex;
# Fork child
$self->handle_sigchld();
defined(my $pid = fork()) or die("Can't fork: $!");
if ($pid) {
eval {
my $client = ProFTPD::TestSuite::FTP->new('127.0.0.1', $port);
eval { $client->help() };
unless ($@) {
die("HELP command succeeded unexpectedly");
}
my $resp_code = $client->response_code();
my $resp_msg = $client->response_msg();
my $expected = 501;
$self->assert($expected == $resp_code,
test_msg("Expected response code $expected, got $resp_code"));
$expected = 'HELP: Permission denied';
$self->assert($expected eq $resp_msg,
test_msg("Expected response message '$expected', got '$resp_msg'"));
$client->login($setup->{user}, $setup->{passwd});
eval { $client->help() };
unless ($@) {
die("HELP command succeeded unexpectedly");
}
$resp_code = $client->response_code();
$resp_msg = $client->response_msg();
$expected = 501;
$self->assert($expected == $resp_code,
test_msg("Expected response code $expected, got $resp_code"));
$expected = 'HELP: Permission denied';
$self->assert($expected eq $resp_msg,
test_msg("Expected response message '$expected', got '$resp_msg'"));
$client->quit();
};
if ($@) {
$ex = $@;
}
$wfh->print("done\n");
$wfh->flush();
} else {
eval { server_wait($setup->{config_file}, $rfh) };
if ($@) {
warn($@);
exit 1;
}
exit 0;
}
# Stop server
server_stop($setup->{pid_file});
$self->assert_child_ok($pid);
test_cleanup($setup->{log_file}, $ex);
}
sub limit_site_help_issue1296 {
my $self = shift;
my $tmpdir = $self->{tmpdir};
my $setup = test_setup($tmpdir, 'limit');
my $config = {
PidFile => $setup->{pid_file},
ScoreboardFile => $setup->{scoreboard_file},
SystemLog => $setup->{log_file},
TraceLog => $setup->{log_file},
Trace => 'command:10 directory:10',
AuthUserFile => $setup->{auth_user_file},
AuthGroupFile => $setup->{auth_group_file},
DefaultChdir => '~',
IfModules => {
'mod_delay.c' => {
DelayEngine => 'off',
},
},
Limit => {
SITE_HELP => {
DenyAll => '',
},
}
};
my ($port, $config_user, $config_group) = config_write($setup->{config_file},
$config);
# Open pipes, for use between the parent and child processes. Specifically,
# the child will indicate when it's done with its test by writing a message
# to the parent.
my ($rfh, $wfh);
unless (pipe($rfh, $wfh)) {
die("Can't open pipe: $!");
}
my $ex;
# Fork child
$self->handle_sigchld();
defined(my $pid = fork()) or die("Can't fork: $!");
if ($pid) {
eval {
my $client = ProFTPD::TestSuite::FTP->new('127.0.0.1', $port);
eval { $client->site('help') };
unless ($@) {
die("SITE HELP command succeeded unexpectedly");
}
my $resp_code = $client->response_code();
my $resp_msg = $client->response_msg();
my $expected = 501;
$self->assert($expected == $resp_code,
test_msg("Expected response code $expected, got $resp_code"));
$expected = 'HELP: Permission denied';
$self->assert($expected eq $resp_msg,
test_msg("Expected response message '$expected', got '$resp_msg'"));
$client->login($setup->{user}, $setup->{passwd});
eval { $client->site('help') };
unless ($@) {
die("SITE HELP command succeeded unexpectedly");
}
$resp_code = $client->response_code();
$resp_msg = $client->response_msg();
$expected = 501;
$self->assert($expected == $resp_code,
test_msg("Expected response code $expected, got $resp_code"));
$expected = 'HELP: Permission denied';
$self->assert($expected eq $resp_msg,
test_msg("Expected response message '$expected', got '$resp_msg'"));
$client->quit();
};
if ($@) {
$ex = $@;
}
$wfh->print("done\n");
$wfh->flush();
} else {
eval { server_wait($setup->{config_file}, $rfh) };
if ($@) {
warn($@);
exit 1;
}
exit 0;
}
# Stop server
server_stop($setup->{pid_file});
$self->assert_child_ok($pid);
test_cleanup($setup->{log_file}, $ex);
}
1;
| gpl-2.0 |
Val-Git/icms2 | system/controllers/activity/hooks/content_after_update_approve.php | 685 | <?php
class onActivityContentAfterUpdateApprove extends cmsAction {
public function run($data){
$ctype_name = $data['ctype_name'];
$item = $data['item'];
// обновляем запись в ленте активности
$this->updateEntry('content', "add.{$ctype_name}", $item['id'], array(
'subject_title' => $item['title'],
'subject_id' => $item['id'],
'subject_url' => href_to_rel($ctype_name, $item['slug'] . '.html'),
'is_private' => isset($item['is_private']) ? $item['is_private'] : 0,
'is_pub' => $item['is_pub']
));
return $data;
}
}
| gpl-2.0 |
acourreges/minicopier | src/minicopier/gui/FailedPanel.java | 2996 | /*
FailedPanel.java / MiniCopier
Copyright (C) 2007-2009 Adrian Courrèges
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of
the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
package minicopier.gui;
import java.awt.BorderLayout;
import java.awt.Component;
import java.awt.Dimension;
import javax.swing.*;
import javax.swing.table.DefaultTableCellRenderer;
import javax.swing.table.DefaultTableModel;
import minicopier.i18n.Language;
public class FailedPanel extends JPanel {
private MainFrame mainFrame;
protected QueueJButton retry;
protected QueueJButton removeFailed;
protected DefaultTableModel failedModel;
protected JTable failedList;
public FailedPanel(MainFrame f){
super();
this.mainFrame = f;
this.retry = new QueueJButton("img/retry.gif");
this.retry.setToolTipText(Language.get("Tooltip.Failed.Retry"));
this.removeFailed = new QueueJButton("img/delete.gif");
this.removeFailed.setToolTipText(Language.get("Tooltip.Failed.Clear"));
failedModel = mainFrame.copier.failedItems.getTableModel();
failedList = new JTable(failedModel);
//transferList.setSelectionMode(ListSelectionModel.SINGLE_SELECTION);
failedList.getColumnModel().getColumn(1).setCellRenderer(new RightTableCellRenderer());
failedList.getColumnModel().getColumn(1).setMaxWidth(80);
failedList.getColumnModel().getColumn(1).setMinWidth(80);
this.setLayout(new BorderLayout());
JScrollPane jspFailed = new JScrollPane(failedList);
jspFailed.setViewportView(failedList);
jspFailed.setPreferredSize(new Dimension(40,40));
//transferList.setAutoResizeMode(JTable.AUTO_RESIZE_LAST_COLUMN);
//jspTransfer.setHorizontalScrollBarPolicy(ScrollPaneConstants.HORIZONTAL_SCROLLBAR_ALWAYS);
//Subpanel (west) with queue management buttons
JPanel queueButtonsPanel = new JPanel();
queueButtonsPanel.setLayout(new BoxLayout(queueButtonsPanel, BoxLayout.Y_AXIS));
queueButtonsPanel.setAlignmentX(Component.CENTER_ALIGNMENT);
queueButtonsPanel.add(this.retry,Component.CENTER_ALIGNMENT);
queueButtonsPanel.add(this.removeFailed);
this.add(queueButtonsPanel,BorderLayout.WEST);
this.add(jspFailed,BorderLayout.CENTER);
}
private class RightTableCellRenderer extends DefaultTableCellRenderer {
public RightTableCellRenderer() {
setHorizontalAlignment(RIGHT);
setVerticalAlignment(CENTER);
}
}
}
| gpl-2.0 |
sdtabilit/Scada-LTS | src/com/serotonin/mango/vo/dataSource/jmx/JmxDataSourceVO.java | 7962 | /*
Mango - Open Source M2M - http://mango.serotoninsoftware.com
Copyright (C) 2006-2011 Serotonin Software Technologies Inc.
@author Matthew Lohbihler
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.serotonin.mango.vo.dataSource.jmx;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.util.List;
import java.util.Map;
import com.serotonin.json.JsonException;
import com.serotonin.json.JsonObject;
import com.serotonin.json.JsonReader;
import com.serotonin.json.JsonRemoteEntity;
import com.serotonin.json.JsonRemoteProperty;
import com.serotonin.mango.Common;
import com.serotonin.mango.rt.dataSource.DataSourceRT;
import com.serotonin.mango.rt.dataSource.jmx.JmxDataSourceRT;
import com.serotonin.mango.rt.event.AlarmLevels;
import com.serotonin.mango.rt.event.type.AuditEventType;
import com.serotonin.mango.rt.event.type.EventType;
import com.serotonin.mango.util.ExportCodes;
import com.serotonin.mango.vo.dataSource.DataSourceVO;
import com.serotonin.mango.vo.event.EventTypeVO;
import com.serotonin.util.SerializationHelper;
import com.serotonin.util.StringUtils;
import com.serotonin.web.dwr.DwrResponseI18n;
import com.serotonin.web.i18n.LocalizableMessage;
/**
* @author Matthew Lohbihler
*/
@JsonRemoteEntity
public class JmxDataSourceVO extends DataSourceVO<JmxDataSourceVO> {
public static final Type TYPE = Type.JMX;
@Override
protected void addEventTypes(List<EventTypeVO> ets) {
ets.add(createEventType(JmxDataSourceRT.DATA_SOURCE_EXCEPTION_EVENT, new LocalizableMessage(
"event.ds.dataSource"), EventType.DuplicateHandling.IGNORE_SAME_MESSAGE, AlarmLevels.URGENT));
ets.add(createEventType(JmxDataSourceRT.POINT_READ_EXCEPTION_EVENT,
new LocalizableMessage("event.ds.pointRead")));
ets.add(createEventType(JmxDataSourceRT.POINT_WRITE_EXCEPTION_EVENT, new LocalizableMessage(
"event.ds.pointWrite")));
}
private static final ExportCodes EVENT_CODES = new ExportCodes();
static {
EVENT_CODES.addElement(JmxDataSourceRT.DATA_SOURCE_EXCEPTION_EVENT, "DATA_SOURCE_EXCEPTION");
EVENT_CODES.addElement(JmxDataSourceRT.POINT_READ_EXCEPTION_EVENT, "POINT_READ_EXCEPTION");
EVENT_CODES.addElement(JmxDataSourceRT.POINT_WRITE_EXCEPTION_EVENT, "POINT_WRITE_EXCEPTION");
}
@Override
public ExportCodes getEventCodes() {
return EVENT_CODES;
}
@Override
public Type getType() {
return TYPE;
}
@Override
public LocalizableMessage getConnectionDescription() {
if (useLocalServer)
return new LocalizableMessage("dsEdit.jmx.dsconn.local");
return new LocalizableMessage("dsEdit.jmx.dsconn.remote", remoteServerAddr);
}
@Override
public DataSourceRT createDataSourceRT() {
return new JmxDataSourceRT(this);
}
@Override
public JmxPointLocatorVO createPointLocator() {
return new JmxPointLocatorVO();
}
@JsonRemoteProperty
private boolean useLocalServer;
@JsonRemoteProperty
private String remoteServerAddr;
private int updatePeriodType = Common.TimePeriods.MINUTES;
@JsonRemoteProperty
private int updatePeriods = 5;
@JsonRemoteProperty
private boolean quantize;
public boolean isUseLocalServer() {
return useLocalServer;
}
public void setUseLocalServer(boolean useLocalServer) {
this.useLocalServer = useLocalServer;
}
public String getRemoteServerAddr() {
return remoteServerAddr;
}
public void setRemoteServerAddr(String remoteServerAddr) {
this.remoteServerAddr = remoteServerAddr;
}
public int getUpdatePeriodType() {
return updatePeriodType;
}
public void setUpdatePeriodType(int updatePeriodType) {
this.updatePeriodType = updatePeriodType;
}
public int getUpdatePeriods() {
return updatePeriods;
}
public void setUpdatePeriods(int updatePeriods) {
this.updatePeriods = updatePeriods;
}
public boolean isQuantize() {
return quantize;
}
public void setQuantize(boolean quantize) {
this.quantize = quantize;
}
@Override
public void validate(DwrResponseI18n response) {
super.validate(response);
if (!useLocalServer && StringUtils.isEmpty(remoteServerAddr))
response.addContextualMessage("remoteServerAddr", "validate.required");
if (!Common.TIME_PERIOD_CODES.isValidId(updatePeriodType))
response.addContextualMessage("updatePeriodType", "validate.invalidValue");
if (updatePeriods <= 0)
response.addContextualMessage("updatePeriods", "validate.greaterThanZero");
}
@Override
protected void addPropertiesImpl(List<LocalizableMessage> list) {
AuditEventType.addPropertyMessage(list, "dsEdit.jmx.useLocalServer", useLocalServer);
AuditEventType.addPropertyMessage(list, "dsEdit.jmx.remoteServerAddr", remoteServerAddr);
AuditEventType.addPeriodMessage(list, "dsEdit.updatePeriod", updatePeriodType, updatePeriods);
AuditEventType.addPropertyMessage(list, "dsEdit.quantize", quantize);
}
@Override
protected void addPropertyChangesImpl(List<LocalizableMessage> list, JmxDataSourceVO from) {
AuditEventType.maybeAddPropertyChangeMessage(list, "dsEdit.jmx.useLocalServer", from.useLocalServer,
useLocalServer);
AuditEventType.maybeAddPropertyChangeMessage(list, "dsEdit.jmx.remoteServerAddr", from.remoteServerAddr,
remoteServerAddr);
AuditEventType.maybeAddPeriodChangeMessage(list, "dsEdit.updatePeriod", from.updatePeriodType,
from.updatePeriods, updatePeriodType, updatePeriods);
AuditEventType.maybeAddPropertyChangeMessage(list, "dsEdit.quantize", from.quantize, quantize);
}
//
//
// Serialization
//
private static final long serialVersionUID = -1;
private static final int version = 1;
private void writeObject(ObjectOutputStream out) throws IOException {
out.writeInt(version);
out.writeBoolean(useLocalServer);
SerializationHelper.writeSafeUTF(out, remoteServerAddr);
out.writeInt(updatePeriodType);
out.writeInt(updatePeriods);
out.writeBoolean(quantize);
}
private void readObject(ObjectInputStream in) throws IOException {
int ver = in.readInt();
// Switch on the version of the class so that version changes can be elegantly handled.
if (ver == 1) {
useLocalServer = in.readBoolean();
remoteServerAddr = SerializationHelper.readSafeUTF(in);
updatePeriodType = in.readInt();
updatePeriods = in.readInt();
quantize = in.readBoolean();
}
}
@Override
public void jsonDeserialize(JsonReader reader, JsonObject json) throws JsonException {
super.jsonDeserialize(reader, json);
Integer value = deserializeUpdatePeriodType(json);
if (value != null)
updatePeriodType = value;
}
@Override
public void jsonSerialize(Map<String, Object> map) {
super.jsonSerialize(map);
serializeUpdatePeriodType(map, updatePeriodType);
}
}
| gpl-2.0 |
TheTypoMaster/calligra-history | kexi/widget/kexicustompropertyfactory_p.cpp | 3633 | /* This file is part of the KDE project
Copyright (C) 2005 Jarosław Staniek <[email protected]>
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public
License as published by the Free Software Foundation; either
version 2 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public License
along with this library; see the file COPYING.LIB. If not, write to
the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301, USA.
*/
#include "kexicustompropertyfactory_p.h"
#include <qlineedit.h>
#include <kdebug.h>
#include <koproperty/Property.h>
#include <kexiutils/identifier.h>
#if 0 //TODO
KexiImagePropertyEdit::KexiImagePropertyEdit(
KoProperty::Property *property, QWidget *parent)
: KoProperty::PixmapEdit(property, parent)
, m_id(0)
{
}
KexiImagePropertyEdit::~KexiImagePropertyEdit()
{
}
void KexiImagePropertyEdit::selectPixmap()
{
QString fileName(KoProperty::PixmapEdit::selectPixmapFileName());
if (fileName.isEmpty())
return;
KexiBLOBBuffer::Handle h(KexiBLOBBuffer::self()->insertPixmap(KUrl(fileName)));
setValue((uint)/*! @todo unsafe*/h.id());
#if 0 //will be reenabled for new image collection
if (!m_manager->activeForm() || !property())
return;
ObjectTreeItem *item = m_manager->activeForm()->objectTree()->lookup(m_manager->activeForm()->selectedWidget()->name());
QString name = item ? item->pixmapName(property()->name()) : "";
PixmapCollectionChooser dialog(m_manager->activeForm()->pixmapCollection(), name, topLevelWidget());
if (dialog.exec() == QDialog::Accepted) {
setValue(dialog.pixmap(), true);
item->setPixmapName(property()->name(), dialog.pixmapName());
}
#endif
}
QVariant KexiImagePropertyEdit::value() const
{
return (uint)/*! @todo unsafe*/m_id;
}
void KexiImagePropertyEdit::setValue(const QVariant &value, bool emitChange)
{
m_id = value.toInt();
PixmapEdit::setValue(KexiBLOBBuffer::self()->objectForId(m_id).pixmap(), emitChange);
}
void KexiImagePropertyEdit::drawViewer(QPainter *p, const QColorGroup &cg, const QRect &r,
const QVariant &value)
{
KexiBLOBBuffer::Handle h(KexiBLOBBuffer::self()->objectForId(value.toInt()));
PixmapEdit::drawViewer(p, cg, r, h.pixmap());
}
#endif
//----------------------------------------------------------------
KexiIdentifierPropertyEdit::KexiIdentifierPropertyEdit(QWidget *parent)
: KoProperty::StringEdit(parent)
{
KexiUtils::IdentifierValidator *val = new KexiUtils::IdentifierValidator(this);
setValidator(val);
val->setObjectName("KexiIdentifierPropertyEdit Validator");
}
KexiIdentifierPropertyEdit::~KexiIdentifierPropertyEdit()
{
}
void KexiIdentifierPropertyEdit::setValue(const QString &value)
{
if (value.isEmpty()) {
kWarning() << "Value cannot be empty. This call has no effect.";
return;
}
const QString identifier(KexiUtils::string2Identifier(value));
if (identifier != value)
kDebug() << QString("String \"%1\" converted to identifier \"%2\".").arg(value).arg(identifier);
KoProperty::StringEdit::setValue(identifier);
}
#include "kexicustompropertyfactory_p.moc"
| gpl-2.0 |
hsfoxman/198362 | wp-content/themes/snapshot/single.php | 2381 | <?php get_header(); the_post(); ?>
<div id="page-title" class="post-title">
<div class="container">
<div class="post-info">
<div class="date">
<em></em>
<a href="<?php the_permalink() ?>"><?php print get_the_date() ?></a>
</div>
<div class="comments">
<em></em>
<a href="#comments"><?php comments_number( __('No Comments', 'snapshot'), __('One Comment', 'snapshot'), __('% Comments', 'snapshot') ); ?></a>
</div>
<?php $category = get_the_category(); if(!empty($category)) : ?>
<div class="category">
<em></em>
<?php the_category(', '); ?>
</div>
<?php endif ?>
</div>
<h1>
<?php the_title() ?>
</h1>
<div class="nav">
<?php previous_post_link('%link') ?>
<?php next_post_link('%link') ?>
</div>
</div>
</div>
<?php get_template_part('viewer') ?>
<div id="post-<?php the_ID() ?>" <?php post_class() ?>>
<div class="container">
<div id="post-share">
<?php if(so_setting('social_display_share')) get_template_part('share') ?>
</div>
<div id="post-main">
<div class="entry-content">
<?php the_content() ?>
<?php global $numpages; if(!empty($numpages) || get_the_tag_list() != '') : ?>
<div class="clear"></div>
<?php endif; ?>
<?php wp_link_pages() ?>
<?php the_tags() ?>
</div>
<div class="clear"></div>
<div id="single-comments-wrapper">
<?php comments_template() ?>
</div>
</div>
<div id="post-images">
<?php
$children = get_children(array(
'post_mime_type' => 'image',
'post_parent' => get_the_ID(),
'post_type' => 'attachment',
'post_status' => 'inherit',
'post_mime_type' => 'image',
'order' => 'ASC',
'orderby' => 'menu_order ID'
));
foreach($children as $child){
$exclude = get_post_meta($child->ID, 'sidebar_exclude', true);
if(!empty($exclude)) continue;
$src = wp_get_attachment_image_src($child->ID, 'single-large');
?>
<div class="image">
<?php print '<a href="'.get_attachment_link($child->ID).'" data-width="'.$src[1].'" data-height="'.$src[2].'">' ?>
<?php print wp_get_attachment_image($child->ID, 'post-thumbnail', false, array('class' => 'thumbnail')); ?>
<?php print '</a>' ?>
</div>
<?php
}
?>
</div>
</div>
<div class="clear"></div>
</div>
<?php get_footer() ?>
| gpl-2.0 |
futranbg/ef65l-kernel-2.0 | drivers/video/msm/msm_fb.c | 104415 | /* drivers/video/msm/msm_fb.c
*
* Core MSM framebuffer driver.
*
* Copyright (C) 2007 Google Incorporated
* Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/mm.h>
#include <linux/fb.h>
#include <linux/msm_mdp.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <mach/board.h>
#include <linux/uaccess.h>
#include <linux/workqueue.h>
#include <linux/string.h>
#include <linux/version.h>
#include <linux/proc_fs.h>
#include <linux/vmalloc.h>
#include <linux/debugfs.h>
#include <linux/console.h>
#include <linux/android_pmem.h>
#include <linux/leds.h>
#include <linux/pm_runtime.h>
#define MSM_FB_C
#include "msm_fb.h"
#include "mddihosti.h"
#include "tvenc.h"
#include "mdp.h"
#include "mdp4.h"
#ifdef CONFIG_PANTECH_MHL_SUSPEND_RESUME
extern void MHL_On(bool on);
extern void sii9244_cfg_power_init(void);
extern int mhl_power_ctrl(int on);
extern void MHL_En_Control(bool on);
#endif
#ifdef CONFIG_SW_RESET
#include "../../../arch/arm/mach-msm/sky_sys_reset.h"
#endif
#ifdef CONFIG_FB_MSM_LOGO
#define INIT_IMAGE_FILE "/initlogo.rle"
extern int load_565rle_image(char *filename, bool bf_supported);
#endif
#if defined(CONFIG_SKY_SMB136S_CHARGER) || defined(CONFIG_SKY_SMB137B_CHARGER)
#define BATTERY_IMAGE_FILE "/logo2.rle"
extern int sky_charging_status(void);
//pz1946 20110920 offline charging
int offline_charging_status = 0;
extern void gpio_set_132_trickle_leakeage(void);
#endif
#ifdef CONFIG_FB_MSM_TRIPLE_BUFFER
#define MSM_FB_NUM 3
#endif
#ifdef CONFIG_SW_RESET
#define REBOOT_IMAGE_FILE "/reboot.rle"
#endif
#if defined(TARGET_BUILD_USER)
static int is_blind_reset = 0;
#endif
/* Idle wakelock to prevent PC between wake up and Vsync */
struct wake_lock mdp_idle_wakelock;
static unsigned char *fbram;
static unsigned char *fbram_phys;
static int fbram_size;
static boolean bf_supported;
static struct platform_device *pdev_list[MSM_FB_MAX_DEV_LIST];
static int pdev_list_cnt;
int vsync_mode = 1;
#define MAX_BLIT_REQ 256
#define MAX_FBI_LIST 32
static struct fb_info *fbi_list[MAX_FBI_LIST];
static int fbi_list_index;
static struct msm_fb_data_type *mfd_list[MAX_FBI_LIST];
static int mfd_list_index;
static u32 msm_fb_pseudo_palette[16] = {
0x00000000, 0xffffffff, 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff
};
static struct ion_client *iclient;
u32 msm_fb_debug_enabled;
/* Setting msm_fb_msg_level to 8 prints out ALL messages */
u32 msm_fb_msg_level = 7;
/* Setting mddi_msg_level to 8 prints out ALL messages */
u32 mddi_msg_level = 5;
#define PZ1759_SW1_BACKLIGHT_ON_WORKAROUND //[BIH] PZ1759_SW1_BACKLIGHT_ON_WORKAROUND -
//Qualcomm suggested backlight on skip problem patch. QSR1000632
#ifdef PZ1759_SW1_BACKLIGHT_ON_WORKAROUND
int FrameUpdatedAfterResume =0;
#endif
extern int32 mdp_block_power_cnt[MDP_MAX_BLOCK];
extern unsigned long mdp_timer_duration;
static int msm_fb_register(struct msm_fb_data_type *mfd);
static int msm_fb_open(struct fb_info *info, int user);
static int msm_fb_release(struct fb_info *info, int user);
static int msm_fb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info);
static int msm_fb_stop_sw_refresher(struct msm_fb_data_type *mfd);
int msm_fb_resume_sw_refresher(struct msm_fb_data_type *mfd);
static int msm_fb_check_var(struct fb_var_screeninfo *var,
struct fb_info *info);
static int msm_fb_set_par(struct fb_info *info);
static int msm_fb_blank_sub(int blank_mode, struct fb_info *info,
boolean op_enable);
static int msm_fb_suspend_sub(struct msm_fb_data_type *mfd);
static int msm_fb_ioctl(struct fb_info *info, unsigned int cmd,
unsigned long arg);
static int msm_fb_mmap(struct fb_info *info, struct vm_area_struct * vma);
#if defined(CONFIG_F_SKYDISP_LCD_FORCE_ONOFF)
static int msm_fb_blank_sub_force(int blank_mode, struct fb_info *info, int bl);
#endif
static void msm_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
#ifdef MSM_FB_ENABLE_DBGFS
#define MSM_FB_MAX_DBGFS 1024
#define MAX_BACKLIGHT_BRIGHTNESS 255
int msm_fb_debugfs_file_index;
struct dentry *msm_fb_debugfs_root;
struct dentry *msm_fb_debugfs_file[MSM_FB_MAX_DBGFS];
DEFINE_MUTEX(msm_fb_notify_update_sem);
void msmfb_no_update_notify_timer_cb(unsigned long data)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)data;
if (!mfd)
pr_err("%s mfd NULL\n", __func__);
complete(&mfd->msmfb_no_update_notify);
}
struct dentry *msm_fb_get_debugfs_root(void)
{
if (msm_fb_debugfs_root == NULL)
msm_fb_debugfs_root = debugfs_create_dir("msm_fb", NULL);
return msm_fb_debugfs_root;
}
void msm_fb_debugfs_file_create(struct dentry *root, const char *name,
u32 *var)
{
if (msm_fb_debugfs_file_index >= MSM_FB_MAX_DBGFS)
return;
msm_fb_debugfs_file[msm_fb_debugfs_file_index++] =
debugfs_create_u32(name, S_IRUGO | S_IWUSR, root, var);
}
#endif
#ifdef CONFIG_F_SKYDISP_NO_CURSOR_IN_BOOT
int msm_fb_cursor_dummy(struct fb_info *info, struct fb_cursor *cursor)
{
return 0;
}
#endif
int msm_fb_cursor(struct fb_info *info, struct fb_cursor *cursor)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
if (!mfd->cursor_update)
return -ENODEV;
return mfd->cursor_update(info, cursor);
}
static int msm_fb_resource_initialized;
#ifndef CONFIG_FB_BACKLIGHT
static int lcd_backlight_registered;
static void msm_fb_set_bl_brightness(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct msm_fb_data_type *mfd = dev_get_drvdata(led_cdev->dev->parent);
int bl_lvl;
if (value > MAX_BACKLIGHT_BRIGHTNESS)
value = MAX_BACKLIGHT_BRIGHTNESS;
/* This maps android backlight level 0 to 255 into
driver backlight level 0 to bl_max with rounding */
bl_lvl = (2 * value * mfd->panel_info.bl_max + MAX_BACKLIGHT_BRIGHTNESS)
/(2 * MAX_BACKLIGHT_BRIGHTNESS);
if (!bl_lvl && value)
bl_lvl = 1;
msm_fb_set_backlight(mfd, bl_lvl);
}
static struct led_classdev backlight_led = {
.name = "lcd-backlight",
.brightness = MAX_BACKLIGHT_BRIGHTNESS,
.brightness_set = msm_fb_set_bl_brightness,
};
#endif
static struct msm_fb_platform_data *msm_fb_pdata;
unsigned char hdmi_prim_display;
int msm_fb_detect_client(const char *name)
{
int ret = 0;
u32 len;
#ifdef CONFIG_FB_MSM_MDDI_AUTO_DETECT
u32 id;
#endif
if (!msm_fb_pdata)
return -EPERM;
len = strnlen(name, PANEL_NAME_MAX_LEN);
if (strnlen(msm_fb_pdata->prim_panel_name, PANEL_NAME_MAX_LEN)) {
pr_err("\n name = %s, prim_display = %s",
name, msm_fb_pdata->prim_panel_name);
if (!strncmp((char *)msm_fb_pdata->prim_panel_name,
name, len)) {
if (!strncmp((char *)msm_fb_pdata->prim_panel_name,
"hdmi_msm", len))
hdmi_prim_display = 1;
return 0;
} else {
ret = -EPERM;
}
}
if (strnlen(msm_fb_pdata->ext_panel_name, PANEL_NAME_MAX_LEN)) {
pr_err("\n name = %s, ext_display = %s",
name, msm_fb_pdata->ext_panel_name);
if (!strncmp((char *)msm_fb_pdata->ext_panel_name, name, len))
return 0;
else
ret = -EPERM;
}
if (ret)
return ret;
ret = -EPERM;
if (msm_fb_pdata && msm_fb_pdata->detect_client) {
ret = msm_fb_pdata->detect_client(name);
/* if it's non mddi panel, we need to pre-scan
mddi client to see if we can disable mddi host */
#ifdef CONFIG_FB_MSM_MDDI_AUTO_DETECT
if (!ret && msm_fb_pdata->mddi_prescan)
id = mddi_get_client_id();
#endif
}
return ret;
}
static ssize_t msm_fb_msm_fb_type(struct device *dev,
struct device_attribute *attr, char *buf)
{
ssize_t ret = 0;
struct fb_info *fbi = dev_get_drvdata(dev);
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
struct msm_fb_panel_data *pdata =
(struct msm_fb_panel_data *)mfd->pdev->dev.platform_data;
switch (pdata->panel_info.type) {
case NO_PANEL:
ret = snprintf(buf, PAGE_SIZE, "no panel\n");
break;
case MDDI_PANEL:
ret = snprintf(buf, PAGE_SIZE, "mddi panel\n");
break;
case EBI2_PANEL:
ret = snprintf(buf, PAGE_SIZE, "ebi2 panel\n");
break;
case LCDC_PANEL:
ret = snprintf(buf, PAGE_SIZE, "lcdc panel\n");
break;
case EXT_MDDI_PANEL:
ret = snprintf(buf, PAGE_SIZE, "ext mddi panel\n");
break;
case TV_PANEL:
ret = snprintf(buf, PAGE_SIZE, "tv panel\n");
break;
case HDMI_PANEL:
ret = snprintf(buf, PAGE_SIZE, "hdmi panel\n");
break;
case DTV_PANEL:
ret = snprintf(buf, PAGE_SIZE, "dtv panel\n");
break;
case MIPI_VIDEO_PANEL:
ret = snprintf(buf, PAGE_SIZE, "mipi dsi video panel\n");
break;
case MIPI_CMD_PANEL:
ret = snprintf(buf, PAGE_SIZE, "mipi dsi cmd panel\n");
break;
default:
ret = snprintf(buf, PAGE_SIZE, "unknown panel\n");
break;
}
return ret;
}
static DEVICE_ATTR(msm_fb_type, S_IRUGO, msm_fb_msm_fb_type, NULL);
static struct attribute *msm_fb_attrs[] = {
&dev_attr_msm_fb_type.attr,
NULL,
};
static struct attribute_group msm_fb_attr_group = {
.attrs = msm_fb_attrs,
};
static int msm_fb_create_sysfs(struct platform_device *pdev)
{
int rc;
struct msm_fb_data_type *mfd = platform_get_drvdata(pdev);
rc = sysfs_create_group(&mfd->fbi->dev->kobj, &msm_fb_attr_group);
if (rc)
MSM_FB_ERR("%s: sysfs group creation failed, rc=%d\n", __func__,
rc);
return rc;
}
static void msm_fb_remove_sysfs(struct platform_device *pdev)
{
struct msm_fb_data_type *mfd = platform_get_drvdata(pdev);
sysfs_remove_group(&mfd->fbi->dev->kobj, &msm_fb_attr_group);
}
static int msm_fb_probe(struct platform_device *pdev)
{
struct msm_fb_data_type *mfd;
int rc;
int err = 0;
MSM_FB_DEBUG("msm_fb_probe\n");
if ((pdev->id == 0) && (pdev->num_resources > 0)) {
msm_fb_pdata = pdev->dev.platform_data;
fbram_size =
pdev->resource[0].end - pdev->resource[0].start + 1;
fbram_phys = (char *)pdev->resource[0].start;
fbram = __va(fbram_phys);
if (!fbram) {
printk(KERN_ERR "fbram ioremap failed!\n");
return -ENOMEM;
}
MSM_FB_DEBUG("msm_fb_probe: phy_Addr = 0x%x virt = 0x%x\n",
(int)fbram_phys, (int)fbram);
iclient = msm_ion_client_create(-1, pdev->name);
if (IS_ERR_OR_NULL(iclient)) {
pr_err("msm_ion_client_create() return"
" error, val %p\n", iclient);
iclient = NULL;
}
msm_fb_resource_initialized = 1;
return 0;
}
if (!msm_fb_resource_initialized)
return -EPERM;
mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
if (!mfd)
return -ENODEV;
if (mfd->key != MFD_KEY)
return -EINVAL;
if (pdev_list_cnt >= MSM_FB_MAX_DEV_LIST)
return -ENOMEM;
mfd->panel_info.frame_count = 0;
#if defined(CONFIG_MACH_MSM8X60_EF65L)
mfd->bl_level = 10;
#else
mfd->bl_level = 0;
#endif
#ifdef CONFIG_FB_MSM_OVERLAY
mfd->overlay_play_enable = 1;
#endif
bf_supported = mdp4_overlay_borderfill_supported();
rc = msm_fb_register(mfd);
if (rc)
return rc;
err = pm_runtime_set_active(mfd->fbi->dev);
if (err < 0)
printk(KERN_ERR "pm_runtime: fail to set active.\n");
pm_runtime_enable(mfd->fbi->dev);
#ifdef CONFIG_FB_BACKLIGHT
msm_fb_config_backlight(mfd);
#else
/* android supports only one lcd-backlight/lcd for now */
if (!lcd_backlight_registered) {
if (led_classdev_register(&pdev->dev, &backlight_led))
printk(KERN_ERR "led_classdev_register failed\n");
else
lcd_backlight_registered = 1;
}
#endif
pdev_list[pdev_list_cnt++] = pdev;
msm_fb_create_sysfs(pdev);
return 0;
}
static int msm_fb_remove(struct platform_device *pdev)
{
struct msm_fb_data_type *mfd;
MSM_FB_DEBUG("msm_fb_remove\n");
mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
msm_fb_remove_sysfs(pdev);
pm_runtime_disable(mfd->fbi->dev);
if (!mfd)
return -ENODEV;
if (mfd->key != MFD_KEY)
return -EINVAL;
if (msm_fb_suspend_sub(mfd))
printk(KERN_ERR "msm_fb_remove: can't stop the device %d\n", mfd->index);
if (mfd->channel_irq != 0)
free_irq(mfd->channel_irq, (void *)mfd);
if (mfd->vsync_width_boundary)
vfree(mfd->vsync_width_boundary);
if (mfd->vsync_resync_timer.function)
del_timer(&mfd->vsync_resync_timer);
if (mfd->refresh_timer.function)
del_timer(&mfd->refresh_timer);
if (mfd->dma_hrtimer.function)
hrtimer_cancel(&mfd->dma_hrtimer);
if (mfd->msmfb_no_update_notify_timer.function)
del_timer(&mfd->msmfb_no_update_notify_timer);
complete(&mfd->msmfb_no_update_notify);
complete(&mfd->msmfb_update_notify);
/* Do this only for the primary panel */
if (mfd->fbi->node == 0)
wake_lock_destroy(&mdp_idle_wakelock);
/* remove /dev/fb* */
unregister_framebuffer(mfd->fbi);
#ifdef CONFIG_FB_BACKLIGHT
/* remove /sys/class/backlight */
backlight_device_unregister(mfd->fbi->bl_dev);
#else
if (lcd_backlight_registered) {
lcd_backlight_registered = 0;
led_classdev_unregister(&backlight_led);
}
#endif
#ifdef MSM_FB_ENABLE_DBGFS
if (mfd->sub_dir)
debugfs_remove(mfd->sub_dir);
#endif
return 0;
}
#if defined(CONFIG_PM) && !defined(CONFIG_HAS_EARLYSUSPEND)
static int msm_fb_suspend(struct platform_device *pdev, pm_message_t state)
{
struct msm_fb_data_type *mfd;
int ret = 0;
MSM_FB_DEBUG("msm_fb_suspend\n");
mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
if ((!mfd) || (mfd->key != MFD_KEY))
return 0;
console_lock();
fb_set_suspend(mfd->fbi, FBINFO_STATE_SUSPENDED);
ret = msm_fb_suspend_sub(mfd);
if (ret != 0) {
printk(KERN_ERR "msm_fb: failed to suspend! %d\n", ret);
fb_set_suspend(mfd->fbi, FBINFO_STATE_RUNNING);
} else {
pdev->dev.power.power_state = state;
}
console_unlock();
return ret;
}
#else
#define msm_fb_suspend NULL
#endif
static int msm_fb_suspend_sub(struct msm_fb_data_type *mfd)
{
int ret = 0;
if ((!mfd) || (mfd->key != MFD_KEY))
return 0;
if (mfd->msmfb_no_update_notify_timer.function)
del_timer(&mfd->msmfb_no_update_notify_timer);
complete(&mfd->msmfb_no_update_notify);
/*
* suspend this channel
*/
mfd->suspend.sw_refreshing_enable = mfd->sw_refreshing_enable;
mfd->suspend.op_enable = mfd->op_enable;
mfd->suspend.panel_power_on = mfd->panel_power_on;
if (mfd->op_enable) {
ret =
msm_fb_blank_sub(FB_BLANK_POWERDOWN, mfd->fbi,
mfd->suspend.op_enable);
if (ret) {
MSM_FB_INFO
("msm_fb_suspend: can't turn off display!\n");
return ret;
}
mfd->op_enable = FALSE;
}
/*
* try to power down
*/
mdp_pipe_ctrl(MDP_MASTER_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
/*
* detach display channel irq if there's any
* or wait until vsync-resync completes
*/
if ((mfd->dest == DISPLAY_LCD)) {
if (mfd->panel_info.lcd.vsync_enable) {
if (mfd->panel_info.lcd.hw_vsync_mode) {
if (mfd->channel_irq != 0)
disable_irq(mfd->channel_irq);
} else {
volatile boolean vh_pending;
do {
vh_pending = mfd->vsync_handler_pending;
} while (vh_pending);
}
}
}
#ifdef CONFIG_PANTECH_MHL_SUSPEND_RESUME
else if (mfd->panel_info.type == HDMI_PANEL ||
mfd->panel_info.type == DTV_PANEL)
{
// MHL_On(0);
// msleep(10);
mhl_power_ctrl(0);
MSM_FB_ERR("[SKY_MHL]%s MHL power off\n",__func__);
}
#endif
return 0;
}
#ifdef CONFIG_PM
static int msm_fb_resume_sub(struct msm_fb_data_type *mfd)
{
int ret = 0;
struct msm_fb_panel_data *pdata = NULL;
if ((!mfd) || (mfd->key != MFD_KEY))
return 0;
pdata = (struct msm_fb_panel_data *)mfd->pdev->dev.platform_data;
/* attach display channel irq if there's any */
if (mfd->channel_irq != 0)
enable_irq(mfd->channel_irq);
/* resume state var recover */
mfd->sw_refreshing_enable = mfd->suspend.sw_refreshing_enable;
mfd->op_enable = mfd->suspend.op_enable;
if (mfd->suspend.panel_power_on) {
#ifdef CONFIG_PANTECH_MHL_SUSPEND_RESUME
if (mfd->panel_info.type == HDMI_PANEL ||
mfd->panel_info.type == DTV_PANEL)
{
mhl_power_ctrl(1);
// msleep(10);
// MHL_On(1);
// sii9244_cfg_power_init();
// MSM_FB_ERR("[SKY_MHL]%s MHL power on \n",__func__);
}
#endif
ret =
msm_fb_blank_sub(FB_BLANK_UNBLANK, mfd->fbi,
mfd->op_enable);
if (ret)
MSM_FB_INFO("msm_fb_resume: can't turn on display!\n");
} else {
if (pdata->power_ctrl)
pdata->power_ctrl(TRUE);
}
return ret;
}
#endif
#if defined(CONFIG_PM) && !defined(CONFIG_HAS_EARLYSUSPEND)
static int msm_fb_resume(struct platform_device *pdev)
{
/* This resume function is called when interrupt is enabled.
*/
int ret = 0;
struct msm_fb_data_type *mfd;
MSM_FB_DEBUG("msm_fb_resume\n");
mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
if ((!mfd) || (mfd->key != MFD_KEY))
return 0;
console_lock();
ret = msm_fb_resume_sub(mfd);
pdev->dev.power.power_state = PMSG_ON;
fb_set_suspend(mfd->fbi, FBINFO_STATE_RUNNING);
#ifdef PZ1759_SW1_BACKLIGHT_ON_WORKAROUND
FrameUpdatedAfterResume =0;
#endif
console_unlock();
return ret;
}
#else
#define msm_fb_resume NULL
#endif
static int msm_fb_runtime_suspend(struct device *dev)
{
dev_dbg(dev, "pm_runtime: suspending...\n");
return 0;
}
static int msm_fb_runtime_resume(struct device *dev)
{
dev_dbg(dev, "pm_runtime: resuming...\n");
return 0;
}
static int msm_fb_runtime_idle(struct device *dev)
{
dev_dbg(dev, "pm_runtime: idling...\n");
return 0;
}
static int msm_fb_ext_suspend(struct device *dev)
{
struct msm_fb_data_type *mfd = dev_get_drvdata(dev);
int ret = 0;
if ((!mfd) || (mfd->key != MFD_KEY))
return 0;
if (mfd->panel_info.type == HDMI_PANEL ||
mfd->panel_info.type == DTV_PANEL)
ret = msm_fb_suspend_sub(mfd);
return ret;
}
static int msm_fb_ext_resume(struct device *dev)
{
struct msm_fb_data_type *mfd = dev_get_drvdata(dev);
int ret = 0;
if ((!mfd) || (mfd->key != MFD_KEY))
return 0;
if (mfd->panel_info.type == HDMI_PANEL ||
mfd->panel_info.type == DTV_PANEL)
ret = msm_fb_resume_sub(mfd);
return ret;
}
static struct dev_pm_ops msm_fb_dev_pm_ops = {
.runtime_suspend = msm_fb_runtime_suspend,
.runtime_resume = msm_fb_runtime_resume,
.runtime_idle = msm_fb_runtime_idle,
.suspend = msm_fb_ext_suspend,
.resume = msm_fb_ext_resume,
};
static struct platform_driver msm_fb_driver = {
.probe = msm_fb_probe,
.remove = msm_fb_remove,
#ifndef CONFIG_HAS_EARLYSUSPEND
.suspend = msm_fb_suspend,
.resume = msm_fb_resume,
#endif
.shutdown = NULL,
.driver = {
/* Driver name must match the device name added in platform.c. */
.name = "msm_fb",
.pm = &msm_fb_dev_pm_ops,
},
};
/*LS2 Do. layer1 feature CONFIG_PANTECH_HDMI_LOOKS_BLACK_OFF is added below for making frame look black screen on HDMI TV */
#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_FB_MSM_MDP303)
static void memset32_io(u32 __iomem *_ptr, u32 val, size_t count)
{
count >>= 2;
while (count--)
writel(val, _ptr++);
}
#endif
static void msm_fb_fillrect(struct fb_info *info,
const struct fb_fillrect *rect);
#ifdef CONFIG_HAS_EARLYSUSPEND
static void msmfb_early_suspend(struct early_suspend *h)
{
struct msm_fb_data_type *mfd = container_of(h, struct msm_fb_data_type,
early_suspend);
/*LS2 Do. layer1 feature CONFIG_PANTECH_HDMI_LOOKS_BLACK_OFF is added below for making frame look black screen on HDMI TV */
#if defined(CONFIG_FB_MSM_MDP303)
/*
* For MDP with overlay, set framebuffer with black pixels
* to show black screen on HDMI.
*/
struct fb_info *fbi = mfd->fbi;
switch (mfd->fbi->var.bits_per_pixel) {
case 32:
memset32_io((void *)fbi->screen_base, 0xFF000000,
fbi->fix.smem_len);
break;
default:
memset32_io((void *)fbi->screen_base, 0x00, fbi->fix.smem_len);
break;
}
#elif defined(CONFIG_PANTECH_HDMI_LOOKS_BLACK)
struct fb_info *fbi = mfd->fbi;
struct fb_fillrect rect;
rect.dx = rect.dy = 0;
rect.width = fbi->var.xres_virtual;
rect.height = fbi->var.yres_virtual;
rect.color = 0;
rect.rop = ROP_COPY;
msm_fb_fillrect(fbi,&rect);
#endif
msm_fb_suspend_sub(mfd);
}
static void msmfb_early_resume(struct early_suspend *h)
{
struct msm_fb_data_type *mfd = container_of(h, struct msm_fb_data_type,
early_suspend);
msm_fb_resume_sub(mfd);
}
#endif
static int unset_bl_level, bl_updated;
static int bl_level_old;
void msm_fb_set_backlight(struct msm_fb_data_type *mfd, __u32 bkl_lvl)
{
struct msm_fb_panel_data *pdata;
#if defined(TARGET_BUILD_USER)
if (is_blind_reset) {
MSM_FB_ERR("[LIVED] blind reset! return!\n");
return;
}
#endif
if (!mfd->panel_power_on || !bl_updated) {
unset_bl_level = bkl_lvl;
#ifdef PZ1759_SW1_BACKLIGHT_ON_WORKAROUND
if(FrameUpdatedAfterResume&&mfd->panel_power_on)
FrameUpdatedAfterResume=0;
else
#endif
return;
} else {
unset_bl_level = 0;
}
pdata = (struct msm_fb_panel_data *)mfd->pdev->dev.platform_data;
if ((pdata) && (pdata->set_backlight)) {
down(&mfd->sem);
#ifdef CONFIG_F_SKYDISP_QBUG_FIX_BACKLIGHT
if (bkl_lvl != mfd->bl_level) {
#endif
mfd->bl_level = bkl_lvl;
#ifdef CONFIG_F_SKYDISP_QBUG_FIX_BACKLIGHT
if (mfd->panel_power_on)
#endif
pdata->set_backlight(mfd);
#ifdef CONFIG_F_SKYDISP_QBUG_FIX_BACKLIGHT
}
#endif
up(&mfd->sem);
}
}
#ifdef CONFIG_F_SKYDISP_QBUG_FIX_BACKLIGHT
static void msm_fb_set_backlight_old(struct msm_fb_data_type *mfd, __u32 bkl_lvl, u32 save)
{
struct msm_fb_panel_data *pdata;
pdata = (struct msm_fb_panel_data *)mfd->pdev->dev.platform_data;
#if defined(TARGET_BUILD_USER)
if (is_blind_reset) {
MSM_FB_ERR("[LIVED] blind reset! return!\n");
return;
}
#endif
if ((pdata) && (pdata->set_backlight)) {
down(&mfd->sem);
if ((bkl_lvl != mfd->bl_level) || (!save)) {
bl_level_old = mfd->bl_level;
mfd->bl_level = bkl_lvl;
//if (mfd->panel_power_on)
pdata->set_backlight(mfd);
if (!save)
mfd->bl_level = bl_level_old;
}
up(&mfd->sem);
}
}
#endif
#ifdef CONFIG_SW_RESET
void msm_reset_set_bl(int bl);
#endif
static int msm_fb_blank_sub(int blank_mode, struct fb_info *info,
boolean op_enable)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
struct msm_fb_panel_data *pdata = NULL;
int ret = 0;
if (!op_enable)
return -EPERM;
pdata = (struct msm_fb_panel_data *)mfd->pdev->dev.platform_data;
if ((!pdata) || (!pdata->on) || (!pdata->off)) {
printk(KERN_ERR "msm_fb_blank_sub: no panel operation detected!\n");
return -ENODEV;
}
switch (blank_mode) {
case FB_BLANK_UNBLANK:
if (!mfd->panel_power_on) {
msleep(16);
ret = pdata->on(mfd->pdev);
if (ret == 0) {
mfd->panel_power_on = TRUE;
#ifdef CONFIG_F_SKYDISP_QBUG_FIX_BACKLIGHT
msm_fb_set_backlight_old(mfd, mfd->bl_level, 0);
#endif
#ifdef CONFIG_SW_RESET
msm_reset_set_bl(1);
#endif
/* ToDo: possible conflict with android which doesn't expect sw refresher */
/*
if (!mfd->hw_refresh)
{
if ((ret = msm_fb_resume_sw_refresher(mfd)) != 0)
{
MSM_FB_INFO("msm_fb_blank_sub: msm_fb_resume_sw_refresher failed = %d!\n",ret);
}
}
*/
}
}
break;
case FB_BLANK_VSYNC_SUSPEND:
case FB_BLANK_HSYNC_SUSPEND:
case FB_BLANK_NORMAL:
case FB_BLANK_POWERDOWN:
default:
if (mfd->panel_power_on) {
int curr_pwr_state;
mfd->op_enable = FALSE;
curr_pwr_state = mfd->panel_power_on;
mfd->panel_power_on = FALSE;
bl_updated = 0;
msleep(16);
ret = pdata->off(mfd->pdev);
#ifdef CONFIG_SW_RESET
msm_reset_set_bl(0);
#endif
if (ret)
mfd->panel_power_on = curr_pwr_state;
mfd->op_enable = TRUE;
} else {
if (pdata->power_ctrl)
pdata->power_ctrl(FALSE);
}
#if defined(TARGET_BUILD_USER)
is_blind_reset = 0;
#endif
break;
}
return ret;
}
int calc_fb_offset(struct msm_fb_data_type *mfd, struct fb_info *fbi, int bpp)
{
struct msm_panel_info *panel_info = &mfd->panel_info;
int remainder, yres, offset;
if (panel_info->mode2_yres != 0) {
yres = panel_info->mode2_yres;
remainder = (fbi->fix.line_length*yres) & (PAGE_SIZE - 1);
} else {
yres = panel_info->yres;
remainder = (fbi->fix.line_length*yres) & (PAGE_SIZE - 1);
}
if (!remainder)
remainder = PAGE_SIZE;
if (fbi->var.yoffset < yres) {
offset = (fbi->var.xoffset * bpp);
/* iBuf->buf += fbi->var.xoffset * bpp + 0 *
yres * fbi->fix.line_length; */
} else if (fbi->var.yoffset >= yres && fbi->var.yoffset < 2 * yres) {
offset = (fbi->var.xoffset * bpp + yres *
fbi->fix.line_length + PAGE_SIZE - remainder);
} else {
offset = (fbi->var.xoffset * bpp + 2 * yres *
fbi->fix.line_length + 2 * (PAGE_SIZE - remainder));
}
return offset;
}
static void msm_fb_fillrect(struct fb_info *info,
const struct fb_fillrect *rect)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
cfb_fillrect(info, rect);
if (!mfd->hw_refresh && (info->var.yoffset == 0) &&
!mfd->sw_currently_refreshing) {
struct fb_var_screeninfo var;
var = info->var;
var.reserved[0] = 0x54445055;
var.reserved[1] = (rect->dy << 16) | (rect->dx);
var.reserved[2] = ((rect->dy + rect->height) << 16) |
(rect->dx + rect->width);
msm_fb_pan_display(&var, info);
}
}
static void msm_fb_copyarea(struct fb_info *info,
const struct fb_copyarea *area)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
cfb_copyarea(info, area);
if (!mfd->hw_refresh && (info->var.yoffset == 0) &&
!mfd->sw_currently_refreshing) {
struct fb_var_screeninfo var;
var = info->var;
var.reserved[0] = 0x54445055;
var.reserved[1] = (area->dy << 16) | (area->dx);
var.reserved[2] = ((area->dy + area->height) << 16) |
(area->dx + area->width);
msm_fb_pan_display(&var, info);
}
}
static void msm_fb_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
cfb_imageblit(info, image);
if (!mfd->hw_refresh && (info->var.yoffset == 0) &&
!mfd->sw_currently_refreshing) {
struct fb_var_screeninfo var;
var = info->var;
var.reserved[0] = 0x54445055;
var.reserved[1] = (image->dy << 16) | (image->dx);
var.reserved[2] = ((image->dy + image->height) << 16) |
(image->dx + image->width);
msm_fb_pan_display(&var, info);
}
}
static int msm_fb_blank(int blank_mode, struct fb_info *info)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
return msm_fb_blank_sub(blank_mode, info, mfd->op_enable);
}
static int msm_fb_set_lut(struct fb_cmap *cmap, struct fb_info *info)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
if (!mfd->lut_update)
return -ENODEV;
mfd->lut_update(info, cmap);
return 0;
}
/*
* Custom Framebuffer mmap() function for MSM driver.
* Differs from standard mmap() function by allowing for customized
* page-protection.
*/
static int msm_fb_mmap(struct fb_info *info, struct vm_area_struct * vma)
{
/* Get frame buffer memory range. */
unsigned long start = info->fix.smem_start;
u32 len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.smem_len);
unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
if (off >= len) {
/* memory mapped io */
off -= len;
if (info->var.accel_flags) {
mutex_unlock(&info->lock);
return -EINVAL;
}
start = info->fix.mmio_start;
len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.mmio_len);
}
/* Set VM flags. */
start &= PAGE_MASK;
if ((vma->vm_end - vma->vm_start + off) > len)
return -EINVAL;
off += start;
vma->vm_pgoff = off >> PAGE_SHIFT;
/* This is an IO map - tell maydump to skip this VMA */
vma->vm_flags |= VM_IO | VM_RESERVED;
/* Set VM page protection */
if (mfd->mdp_fb_page_protection == MDP_FB_PAGE_PROTECTION_WRITECOMBINE)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
else if (mfd->mdp_fb_page_protection ==
MDP_FB_PAGE_PROTECTION_WRITETHROUGHCACHE)
vma->vm_page_prot = pgprot_writethroughcache(vma->vm_page_prot);
else if (mfd->mdp_fb_page_protection ==
MDP_FB_PAGE_PROTECTION_WRITEBACKCACHE)
vma->vm_page_prot = pgprot_writebackcache(vma->vm_page_prot);
else if (mfd->mdp_fb_page_protection ==
MDP_FB_PAGE_PROTECTION_WRITEBACKWACACHE)
vma->vm_page_prot = pgprot_writebackwacache(vma->vm_page_prot);
else
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
/* Remap the frame buffer I/O range */
if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
vma->vm_end - vma->vm_start,
vma->vm_page_prot))
return -EAGAIN;
return 0;
}
#if defined(CONFIG_F_SKYDISP_LCD_FORCE_ONOFF)
/* just force lcd on/off, no backlight control */
static int msm_fb_blank_sub_force(int onoff, struct fb_info *info, int bl)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
struct msm_fb_panel_data *pdata = NULL;
int ret = 0;
pdata = (struct msm_fb_panel_data *)mfd->pdev->dev.platform_data;
if ((!pdata) || (!pdata->on) || (!pdata->off)) {
MSM_FB_ERR("msm_fb_blank_sub_force: no panel operation detected!\n");
return -ENODEV;
}
MSM_FB_ERR("[LIVED] msm_fb_blank_sub_force: onoff=%d, mfd->panel_power_on=%d\n",
onoff, mfd->panel_power_on);
if (onoff) {
if (!mfd->panel_power_on) {
msleep(16);
ret = pdata->on(mfd->pdev);
if (ret == 0) {
mfd->panel_power_on = TRUE;
msleep(16);
if (bl == 1) {
msm_fb_set_backlight_old(mfd, mfd->bl_level, 0);
}
}
}
} else {
if (mfd->panel_power_on) {
int curr_pwr_state;
//mfd->op_enable = FALSE;
curr_pwr_state = mfd->panel_power_on;
mfd->panel_power_on = FALSE;
if (bl == 1) {
msm_fb_set_backlight_old(mfd, 0, 0);
}
msleep(16);
ret = pdata->off(mfd->pdev);
if (ret)
mfd->panel_power_on = curr_pwr_state;
//mfd->op_enable = TRUE;
}
}
return ret;
}
#endif
static struct fb_ops msm_fb_ops = {
.owner = THIS_MODULE,
.fb_open = msm_fb_open,
.fb_release = msm_fb_release,
.fb_read = NULL,
.fb_write = NULL,
#ifndef CONFIG_F_SKYDISP_NO_CURSOR_IN_BOOT
.fb_cursor = NULL,
#else
.fb_cursor = msm_fb_cursor_dummy, // lived for delete cursor
#endif
.fb_check_var = msm_fb_check_var, /* vinfo check */
.fb_set_par = msm_fb_set_par, /* set the video mode according to info->var */
.fb_setcolreg = NULL, /* set color register */
.fb_blank = msm_fb_blank, /* blank display */
.fb_pan_display = msm_fb_pan_display, /* pan display */
.fb_fillrect = msm_fb_fillrect, /* Draws a rectangle */
.fb_copyarea = msm_fb_copyarea, /* Copy data from area to another */
.fb_imageblit = msm_fb_imageblit, /* Draws a image to the display */
.fb_rotate = NULL,
.fb_sync = NULL, /* wait for blit idle, optional */
.fb_ioctl = msm_fb_ioctl, /* perform fb specific ioctl (optional) */
.fb_mmap = msm_fb_mmap,
};
static __u32 msm_fb_line_length(__u32 fb_index, __u32 xres, int bpp)
{
/* The adreno GPU hardware requires that the pitch be aligned to
32 pixels for color buffers, so for the cases where the GPU
is writing directly to fb0, the framebuffer pitch
also needs to be 32 pixel aligned */
if (fb_index == 0)
return ALIGN(xres, 32) * bpp;
else
return xres * bpp;
}
#ifdef CONFIG_SW_RESET
#define RESTART_BL_ON 0x9A247D59
#define RESTART_BL_OFF 0x1B93214E
int msm_reset_get_bl(void)
{
void *restart_bl;
int bl;
restart_bl = ioremap_nocache(RESTART_REASON_ADDR, 0x1000);
bl = readl(restart_bl+8);
iounmap(restart_bl);
if (bl == RESTART_BL_ON) {
bl = 1;
} else if (bl == RESTART_BL_OFF) {
bl = 0;
} else {
bl = -1;
}
MSM_FB_ERR("[LIVED] reset_get_bl=%d\n", bl);
return bl;
}
void msm_reset_set_bl(int bl)
{
void *restart_bl;
restart_bl = ioremap_nocache(RESTART_REASON_ADDR, 0x1000);
if (bl > 0) {
bl = RESTART_BL_ON;
} else {
bl = RESTART_BL_OFF;
}
writel(bl, restart_bl+8);
iounmap(restart_bl);
MSM_FB_ERR("[LIVED] reset_set_bl=%x\n", bl);
}
int msm_reset_reason_read_only(void)
{
void *restart_reason;
int reason, result;
restart_reason = ioremap_nocache(RESTART_REASON_ADDR, 0x1000);
reason = readl(restart_reason);
iounmap(restart_reason);
switch (reason)
{
case SYS_RESET_REASON_EXCEPTION:
case SYS_RESET_REASON_ASSERT:
case SYS_RESET_REASON_LINUX:
case SYS_RESET_REASON_ANDROID:
case SYS_RESET_REASON_ABNORMAL:
case SYS_RESET_REASON_MDM_EXCEPTION:
case SYS_RESET_REASON_UNKNOWN:
result = 1;
break;
default:
result = 0;
break;
}
MSM_FB_INFO("[LIVED] msm_reset_reason_read_only:reason[%x],result[%x]\n",reason,result);
return result;
}
int msm_reset_reason(void)
{
void *restart_reason;
int reason, result;
struct proc_dir_entry *reset_info;
restart_reason = ioremap_nocache(RESTART_REASON_ADDR, 0x1000);
reason = readl(restart_reason);
sky_sys_rst_set_prev_reset_info();
#if defined(CONFIG_PANTECH_PRESTO_BOARD)
reset_info = create_proc_entry("pantech_resetinfo" , \
S_IRUSR | S_IWUSR | \
S_IRGRP | S_IWGRP, NULL);
#else
reset_info = create_proc_entry("pantech_resetinfo" , \
S_IRUGO | S_IWUGO, NULL);
#endif /* CONFIG_PANTECH_PRESTO_BOARD */
if (reset_info) {
reset_info->read_proc = sky_sys_rst_read_proc_reset_info;
reset_info->write_proc = sky_sys_rst_write_proc_reset_info;
reset_info->data = NULL;
}
//writel(SYS_RESET_REASON_ABNORMAL, restart_reason);
iounmap(restart_reason);
switch (reason)
{
case SYS_RESET_REASON_EXCEPTION:
case SYS_RESET_REASON_ASSERT:
case SYS_RESET_REASON_LINUX:
case SYS_RESET_REASON_ANDROID:
case SYS_RESET_REASON_ABNORMAL:
case SYS_RESET_REASON_MDM_EXCEPTION:
case SYS_RESET_REASON_UNKNOWN:
result = 1;
break;
default:
result = 0;
break;
}
MSM_FB_ERR("[allydrop] msm_reset_reason:reason[%x],result[%x]\n",reason,result);
return result;
}
static void msm_reset_reason_clear(void)
{
void *restart_reason;
restart_reason = ioremap_nocache(RESTART_REASON_ADDR, 0x1000);
writel(SYS_RESET_REASON_ABNORMAL, restart_reason);
iounmap(restart_reason);
MSM_FB_ERR("[LIVED] msm_reset_reason_clear\n");
}
#endif
static int msm_fb_register(struct msm_fb_data_type *mfd)
{
int ret = -ENODEV;
int bpp;
struct msm_panel_info *panel_info = &mfd->panel_info;
struct fb_info *fbi = mfd->fbi;
struct fb_fix_screeninfo *fix;
struct fb_var_screeninfo *var;
int *id;
int fbram_offset;
int remainder, remainder_mode2;
static int subsys_id[2] = {MSM_SUBSYSTEM_DISPLAY,
MSM_SUBSYSTEM_ROTATOR};
unsigned int flags = MSM_SUBSYSTEM_MAP_IOVA;
/*
* fb info initialization
*/
fix = &fbi->fix;
var = &fbi->var;
fix->type_aux = 0; /* if type == FB_TYPE_INTERLEAVED_PLANES */
fix->visual = FB_VISUAL_TRUECOLOR; /* True Color */
fix->ywrapstep = 0; /* No support */
fix->mmio_start = 0; /* No MMIO Address */
fix->mmio_len = 0; /* No MMIO Address */
fix->accel = FB_ACCEL_NONE;/* FB_ACCEL_MSM needes to be added in fb.h */
var->xoffset = 0, /* Offset from virtual to visible */
var->yoffset = 0, /* resolution */
var->grayscale = 0, /* No graylevels */
var->nonstd = 0, /* standard pixel format */
var->activate = FB_ACTIVATE_VBL, /* activate it at vsync */
#ifdef CONFIG_MACH_MSM8X60_PRESTO // kkcho_temp_presto
var->height = 86, /* height of picture in mm */
var->width = 52, /* width of picture in mm */
#else
var->height = 101, /* height of picture in mm */
var->width = 63, /* width of picture in mm */
#endif
//var->height = -1, /* height of picture in mm */
//var->width = -1, /* width of picture in mm */
var->accel_flags = 0, /* acceleration flags */
var->sync = 0, /* see FB_SYNC_* */
var->rotate = 0, /* angle we rotate counter clockwise */
mfd->op_enable = FALSE;
switch (mfd->fb_imgType) {
case MDP_RGB_565:
fix->type = FB_TYPE_PACKED_PIXELS;
fix->xpanstep = 1;
fix->ypanstep = 1;
var->vmode = FB_VMODE_NONINTERLACED;
var->blue.offset = 0;
var->green.offset = 5;
var->red.offset = 11;
var->blue.length = 5;
var->green.length = 6;
var->red.length = 5;
var->blue.msb_right = 0;
var->green.msb_right = 0;
var->red.msb_right = 0;
var->transp.offset = 0;
var->transp.length = 0;
bpp = 2;
break;
case MDP_RGB_888:
fix->type = FB_TYPE_PACKED_PIXELS;
fix->xpanstep = 1;
fix->ypanstep = 1;
var->vmode = FB_VMODE_NONINTERLACED;
var->blue.offset = 0;
var->green.offset = 8;
var->red.offset = 16;
var->blue.length = 8;
var->green.length = 8;
var->red.length = 8;
var->blue.msb_right = 0;
var->green.msb_right = 0;
var->red.msb_right = 0;
var->transp.offset = 0;
var->transp.length = 0;
bpp = 3;
break;
case MDP_ARGB_8888:
fix->type = FB_TYPE_PACKED_PIXELS;
fix->xpanstep = 1;
fix->ypanstep = 1;
var->vmode = FB_VMODE_NONINTERLACED;
var->blue.offset = 0;
var->green.offset = 8;
var->red.offset = 16;
var->blue.length = 8;
var->green.length = 8;
var->red.length = 8;
var->blue.msb_right = 0;
var->green.msb_right = 0;
var->red.msb_right = 0;
var->transp.offset = 24;
var->transp.length = 8;
bpp = 4;
break;
case MDP_RGBA_8888:
fix->type = FB_TYPE_PACKED_PIXELS;
fix->xpanstep = 1;
fix->ypanstep = 1;
var->vmode = FB_VMODE_NONINTERLACED;
var->blue.offset = 8;
var->green.offset = 16;
var->red.offset = 24;
var->blue.length = 8;
var->green.length = 8;
var->red.length = 8;
var->blue.msb_right = 0;
var->green.msb_right = 0;
var->red.msb_right = 0;
var->transp.offset = 0;
var->transp.length = 8;
bpp = 4;
break;
case MDP_YCRYCB_H2V1:
/* ToDo: need to check TV-Out YUV422i framebuffer format */
/* we might need to create new type define */
fix->type = FB_TYPE_INTERLEAVED_PLANES;
fix->xpanstep = 2;
fix->ypanstep = 1;
var->vmode = FB_VMODE_NONINTERLACED;
/* how about R/G/B offset? */
var->blue.offset = 0;
var->green.offset = 5;
var->red.offset = 11;
var->blue.length = 5;
var->green.length = 6;
var->red.length = 5;
var->blue.msb_right = 0;
var->green.msb_right = 0;
var->red.msb_right = 0;
var->transp.offset = 0;
var->transp.length = 0;
bpp = 2;
break;
default:
MSM_FB_ERR("msm_fb_init: fb %d unkown image type!\n",
mfd->index);
return ret;
}
fix->type = panel_info->is_3d_panel;
fix->line_length = msm_fb_line_length(mfd->index, panel_info->xres,
bpp);
/* Make sure all buffers can be addressed on a page boundary by an x
* and y offset */
remainder = (fix->line_length * panel_info->yres) & (PAGE_SIZE - 1);
/* PAGE_SIZE is a power of 2 */
if (!remainder)
remainder = PAGE_SIZE;
remainder_mode2 = (fix->line_length *
panel_info->mode2_yres) & (PAGE_SIZE - 1);
if (!remainder_mode2)
remainder_mode2 = PAGE_SIZE;
/*
* calculate smem_len based on max size of two supplied modes.
* Only fb0 has mem. fb1 and fb2 don't have mem.
*/
if (!bf_supported || mfd->index == 0)
fix->smem_len = MAX((msm_fb_line_length(mfd->index,
panel_info->xres,
bpp) *
panel_info->yres + PAGE_SIZE -
remainder) * mfd->fb_page,
(msm_fb_line_length(mfd->index,
panel_info->mode2_xres,
bpp) *
panel_info->mode2_yres + PAGE_SIZE -
remainder_mode2) * mfd->fb_page);
else if (mfd->index == 1 || mfd->index == 2) {
pr_debug("%s:%d no memory is allocated for fb%d!\n",
__func__, __LINE__, mfd->index);
fix->smem_len = 0;
}
mfd->var_xres = panel_info->xres;
mfd->var_yres = panel_info->yres;
mfd->var_frame_rate = panel_info->frame_rate;
var->pixclock = mfd->panel_info.clk_rate;
mfd->var_pixclock = var->pixclock;
var->xres = panel_info->xres;
var->yres = panel_info->yres;
var->xres_virtual = panel_info->xres;
var->yres_virtual = panel_info->yres * mfd->fb_page +
((PAGE_SIZE - remainder)/fix->line_length) * mfd->fb_page;
var->bits_per_pixel = bpp * 8; /* FrameBuffer color depth */
if (mfd->dest == DISPLAY_LCD) {
if (panel_info->type == MDDI_PANEL && panel_info->mddi.is_type1)
var->reserved[4] = panel_info->lcd.refx100 / (100 * 2);
else
var->reserved[4] = panel_info->lcd.refx100 / 100;
} else {
if (panel_info->type == MIPI_VIDEO_PANEL) {
var->reserved[4] = panel_info->mipi.frame_rate;
} else {
var->reserved[4] = panel_info->clk_rate /
((panel_info->lcdc.h_back_porch +
panel_info->lcdc.h_front_porch +
panel_info->lcdc.h_pulse_width +
panel_info->xres) *
(panel_info->lcdc.v_back_porch +
panel_info->lcdc.v_front_porch +
panel_info->lcdc.v_pulse_width +
panel_info->yres));
}
}
pr_debug("reserved[4] %u\n", var->reserved[4]);
/*
* id field for fb app
*/
id = (int *)&mfd->panel;
#if defined(CONFIG_FB_MSM_MDP22)
snprintf(fix->id, sizeof(fix->id), "msmfb22_%x", (__u32) *id);
#elif defined(CONFIG_FB_MSM_MDP30)
snprintf(fix->id, sizeof(fix->id), "msmfb30_%x", (__u32) *id);
#elif defined(CONFIG_FB_MSM_MDP31)
snprintf(fix->id, sizeof(fix->id), "msmfb31_%x", (__u32) *id);
#elif defined(CONFIG_FB_MSM_MDP40)
snprintf(fix->id, sizeof(fix->id), "msmfb40_%x", (__u32) *id);
#else
error CONFIG_FB_MSM_MDP undefined !
#endif
fbi->fbops = &msm_fb_ops;
fbi->flags = FBINFO_FLAG_DEFAULT;
fbi->pseudo_palette = msm_fb_pseudo_palette;
mfd->ref_cnt = 0;
mfd->sw_currently_refreshing = FALSE;
mfd->sw_refreshing_enable = TRUE;
mfd->panel_power_on = FALSE;
mfd->pan_waiting = FALSE;
init_completion(&mfd->pan_comp);
init_completion(&mfd->refresher_comp);
sema_init(&mfd->sem, 1);
init_timer(&mfd->msmfb_no_update_notify_timer);
mfd->msmfb_no_update_notify_timer.function =
msmfb_no_update_notify_timer_cb;
mfd->msmfb_no_update_notify_timer.data = (unsigned long)mfd;
init_completion(&mfd->msmfb_update_notify);
init_completion(&mfd->msmfb_no_update_notify);
fbram_offset = PAGE_ALIGN((int)fbram)-(int)fbram;
fbram += fbram_offset;
fbram_phys += fbram_offset;
fbram_size -= fbram_offset;
if (!bf_supported || mfd->index == 0)
if (fbram_size < fix->smem_len) {
pr_err("error: no more framebuffer memory!\n");
return -ENOMEM;
}
fbi->screen_base = fbram;
fbi->fix.smem_start = (unsigned long)fbram_phys;
mfd->map_buffer = msm_subsystem_map_buffer(
fbi->fix.smem_start, fbi->fix.smem_len,
flags, subsys_id, 2);
if (mfd->map_buffer) {
pr_debug("%s(): buf 0x%lx, mfd->map_buffer->iova[0] 0x%lx\n"
"mfd->map_buffer->iova[1] 0x%lx", __func__,
fbi->fix.smem_start, mfd->map_buffer->iova[0],
mfd->map_buffer->iova[1]);
}
if (!bf_supported || mfd->index == 0)
memset(fbi->screen_base, 0x0, fix->smem_len);
mfd->op_enable = TRUE;
mfd->panel_power_on = FALSE;
/* cursor memory allocation */
if (mfd->cursor_update) {
mfd->cursor_buf = dma_alloc_coherent(NULL,
MDP_CURSOR_SIZE,
(dma_addr_t *) &mfd->cursor_buf_phys,
GFP_KERNEL);
if (!mfd->cursor_buf)
mfd->cursor_update = 0;
}
if (mfd->lut_update) {
ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
if (ret)
printk(KERN_ERR "%s: fb_alloc_cmap() failed!\n",
__func__);
}
if (register_framebuffer(fbi) < 0) {
if (mfd->lut_update)
fb_dealloc_cmap(&fbi->cmap);
if (mfd->cursor_buf)
dma_free_coherent(NULL,
MDP_CURSOR_SIZE,
mfd->cursor_buf,
(dma_addr_t) mfd->cursor_buf_phys);
mfd->op_enable = FALSE;
return -EPERM;
}
if (fbi->node == 0)
wake_lock_init(&mdp_idle_wakelock, WAKE_LOCK_IDLE, "mdp");
fbram += fix->smem_len;
fbram_phys += fix->smem_len;
fbram_size -= fix->smem_len;
MSM_FB_INFO
("FrameBuffer[%d] %dx%d size=%d bytes is registered successfully!\n",
mfd->index, fbi->var.xres, fbi->var.yres, fbi->fix.smem_len);
#ifdef CONFIG_PANTECH_DONOT_POWER_ON_HDMI_AT_FB_OPEN
if (mfd->index == 0)
#endif
{
#if defined(CONFIG_F_SKYDISP_BOOT_LOGO_IN_KERNEL) && defined(CONFIG_FB_MSM_LOGO)
#if defined(CONFIG_SKY_SMB136S_CHARGER) || defined(CONFIG_SKY_SMB137B_CHARGER)
if (sky_charging_status()) {
gpio_set_132_trickle_leakeage(); //jwchoi_temp
ret = load_565rle_image(BATTERY_IMAGE_FILE, bf_supported);
offline_charging_status = 1;
#if defined(CONFIG_MACH_MSM8X60_EF65L)
mfd->bl_level = 3; //jwchoi_temp
#endif
} else
#endif
#ifdef CONFIG_SW_RESET
#ifndef CONFIG_MACH_MSM8X60_PRESTO
if (msm_reset_reason()) {
if (msm_reset_get_bl() == 1)
ret = load_565rle_image(REBOOT_IMAGE_FILE, bf_supported);
else {
#if defined(TARGET_BUILD_USER)
is_blind_reset = 1;
#endif
ret = 1;
}
} else
#else
msm_reset_reason();
#endif
#endif
ret = load_565rle_image(INIT_IMAGE_FILE, bf_supported);
#ifdef CONFIG_SW_RESET
msm_reset_reason_clear();
#endif
if (!ret) {
if (msm_fb_blank_sub(FB_BLANK_UNBLANK, fbi, true)) {
MSM_FB_ERR("[LIVED] msm_fb_register: can't turn on display!\n");
}
}
#else
#ifdef CONFIG_FB_MSM_LOGO
/* Flip buffer */
if (!load_565rle_image(INIT_IMAGE_FILE, bf_supported));
#endif
#endif
ret = 0;
}
#ifdef CONFIG_HAS_EARLYSUSPEND
if (mfd->panel_info.type != DTV_PANEL) {
mfd->early_suspend.suspend = msmfb_early_suspend;
mfd->early_suspend.resume = msmfb_early_resume;
mfd->early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB - 2;
register_early_suspend(&mfd->early_suspend);
}
#endif
#ifdef MSM_FB_ENABLE_DBGFS
{
struct dentry *root;
struct dentry *sub_dir;
char sub_name[2];
root = msm_fb_get_debugfs_root();
if (root != NULL) {
sub_name[0] = (char)(mfd->index + 0x30);
sub_name[1] = '\0';
sub_dir = debugfs_create_dir(sub_name, root);
} else {
sub_dir = NULL;
}
mfd->sub_dir = sub_dir;
if (sub_dir) {
msm_fb_debugfs_file_create(sub_dir, "op_enable",
(u32 *) &mfd->op_enable);
msm_fb_debugfs_file_create(sub_dir, "panel_power_on",
(u32 *) &mfd->
panel_power_on);
msm_fb_debugfs_file_create(sub_dir, "ref_cnt",
(u32 *) &mfd->ref_cnt);
msm_fb_debugfs_file_create(sub_dir, "fb_imgType",
(u32 *) &mfd->fb_imgType);
msm_fb_debugfs_file_create(sub_dir,
"sw_currently_refreshing",
(u32 *) &mfd->
sw_currently_refreshing);
msm_fb_debugfs_file_create(sub_dir,
"sw_refreshing_enable",
(u32 *) &mfd->
sw_refreshing_enable);
msm_fb_debugfs_file_create(sub_dir, "xres",
(u32 *) &mfd->panel_info.
xres);
msm_fb_debugfs_file_create(sub_dir, "yres",
(u32 *) &mfd->panel_info.
yres);
msm_fb_debugfs_file_create(sub_dir, "bpp",
(u32 *) &mfd->panel_info.
bpp);
msm_fb_debugfs_file_create(sub_dir, "type",
(u32 *) &mfd->panel_info.
type);
msm_fb_debugfs_file_create(sub_dir, "wait_cycle",
(u32 *) &mfd->panel_info.
wait_cycle);
msm_fb_debugfs_file_create(sub_dir, "pdest",
(u32 *) &mfd->panel_info.
pdest);
msm_fb_debugfs_file_create(sub_dir, "backbuff",
(u32 *) &mfd->panel_info.
fb_num);
msm_fb_debugfs_file_create(sub_dir, "clk_rate",
(u32 *) &mfd->panel_info.
clk_rate);
msm_fb_debugfs_file_create(sub_dir, "frame_count",
(u32 *) &mfd->panel_info.
frame_count);
switch (mfd->dest) {
case DISPLAY_LCD:
msm_fb_debugfs_file_create(sub_dir,
"vsync_enable",
(u32 *)&mfd->panel_info.lcd.vsync_enable);
msm_fb_debugfs_file_create(sub_dir,
"refx100",
(u32 *) &mfd->panel_info.lcd. refx100);
msm_fb_debugfs_file_create(sub_dir,
"v_back_porch",
(u32 *) &mfd->panel_info.lcd.v_back_porch);
msm_fb_debugfs_file_create(sub_dir,
"v_front_porch",
(u32 *) &mfd->panel_info.lcd.v_front_porch);
msm_fb_debugfs_file_create(sub_dir,
"v_pulse_width",
(u32 *) &mfd->panel_info.lcd.v_pulse_width);
msm_fb_debugfs_file_create(sub_dir,
"hw_vsync_mode",
(u32 *) &mfd->panel_info.lcd.hw_vsync_mode);
msm_fb_debugfs_file_create(sub_dir,
"vsync_notifier_period", (u32 *)
&mfd->panel_info.lcd.vsync_notifier_period);
break;
case DISPLAY_LCDC:
msm_fb_debugfs_file_create(sub_dir,
"h_back_porch",
(u32 *) &mfd->panel_info.lcdc.h_back_porch);
msm_fb_debugfs_file_create(sub_dir,
"h_front_porch",
(u32 *) &mfd->panel_info.lcdc.h_front_porch);
msm_fb_debugfs_file_create(sub_dir,
"h_pulse_width",
(u32 *) &mfd->panel_info.lcdc.h_pulse_width);
msm_fb_debugfs_file_create(sub_dir,
"v_back_porch",
(u32 *) &mfd->panel_info.lcdc.v_back_porch);
msm_fb_debugfs_file_create(sub_dir,
"v_front_porch",
(u32 *) &mfd->panel_info.lcdc.v_front_porch);
msm_fb_debugfs_file_create(sub_dir,
"v_pulse_width",
(u32 *) &mfd->panel_info.lcdc.v_pulse_width);
msm_fb_debugfs_file_create(sub_dir,
"border_clr",
(u32 *) &mfd->panel_info.lcdc.border_clr);
msm_fb_debugfs_file_create(sub_dir,
"underflow_clr",
(u32 *) &mfd->panel_info.lcdc.underflow_clr);
msm_fb_debugfs_file_create(sub_dir,
"hsync_skew",
(u32 *) &mfd->panel_info.lcdc.hsync_skew);
break;
default:
break;
}
}
}
#endif /* MSM_FB_ENABLE_DBGFS */
return ret;
}
static int msm_fb_open(struct fb_info *info, int user)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
int result;
result = pm_runtime_get_sync(info->dev);
if (result < 0) {
printk(KERN_ERR "pm_runtime: fail to wake up\n");
}
if (info->node == 0 && !(mfd->cont_splash_done)) { /* primary */
mfd->ref_cnt++;
return 0;
}
/*20110816, kkcho, ºÎÆÃ½Ã hdmi/mhlÀ» on½ÃŰÁö ¾Êµµ·Ï fb0À϶§¸¸ ON½ÃŲ´Ù. Hdmi ´Â index°¡ '1'ÀÓ. */
#ifdef CONFIG_PANTECH_DONOT_POWER_ON_HDMI_AT_FB_OPEN
if (!mfd->ref_cnt && mfd->index ==0) {
#else
if (!mfd->ref_cnt) {
#endif
if (!bf_supported ||
(info->node != 1 && info->node != 2))
mdp_set_dma_pan_info(info, NULL, TRUE);
else
pr_debug("%s:%d no mdp_set_dma_pan_info %d\n",
__func__, __LINE__, info->node);
if (msm_fb_blank_sub(FB_BLANK_UNBLANK, info, mfd->op_enable)) {
printk(KERN_ERR "msm_fb_open: can't turn on display!\n");
return -1;
}
}
mfd->ref_cnt++;
return 0;
}
#if defined(CONFIG_F_SKYDISP_BOOT_LOGO_IN_KERNEL) || defined(CONFIG_SKY_SMB136S_CHARGER) || defined(CONFIG_SKY_SMB137B_CHARGER)
boolean no_release_first = TRUE;
#endif
static int msm_fb_release(struct fb_info *info, int user)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
int ret = 0;
if (!mfd->ref_cnt) {
MSM_FB_INFO("msm_fb_release: try to close unopened fb %d!\n",
mfd->index);
return -EINVAL;
}
mfd->ref_cnt--;
#if defined(CONFIG_F_SKYDISP_BOOT_LOGO_IN_KERNEL) || defined(CONFIG_SKY_SMB136S_CHARGER) || defined(CONFIG_SKY_SMB137B_CHARGER)
if (!mfd->ref_cnt && !no_release_first)
#else
if (!mfd->ref_cnt)
#endif
{
#if defined(CONFIG_F_SKYDISP_BOOT_LOGO_IN_KERNEL) || defined(CONFIG_SKY_SMB136S_CHARGER) || defined(CONFIG_SKY_SMB137B_CHARGER)
no_release_first = FALSE;
#endif
if ((ret =
msm_fb_blank_sub(FB_BLANK_POWERDOWN, info,
mfd->op_enable)) != 0) {
printk(KERN_ERR "msm_fb_release: can't turn off display!\n");
return ret;
}
}
pm_runtime_put(info->dev);
return ret;
}
DEFINE_SEMAPHORE(msm_fb_pan_sem);
static int msm_fb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct mdp_dirty_region dirty;
struct mdp_dirty_region *dirtyPtr = NULL;
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
struct msm_fb_panel_data *pdata;
/*
* If framebuffer is 1 or 2, io pen display is not allowed.
*/
if (bf_supported &&
(info->node == 1 || info->node == 2)) {
pr_err("%s: no pan display for fb%d!",
__func__, info->node);
return -EPERM;
}
if (info->node != 0 || mfd->cont_splash_done) /* primary */
if ((!mfd->op_enable) || (!mfd->panel_power_on))
return -EPERM;
if (var->xoffset > (info->var.xres_virtual - info->var.xres))
return -EINVAL;
if (var->yoffset > (info->var.yres_virtual - info->var.yres))
return -EINVAL;
if (info->fix.xpanstep)
info->var.xoffset =
(var->xoffset / info->fix.xpanstep) * info->fix.xpanstep;
if (info->fix.ypanstep)
info->var.yoffset =
(var->yoffset / info->fix.ypanstep) * info->fix.ypanstep;
/* "UPDT" */
if (var->reserved[0] == 0x54445055) {
dirty.xoffset = var->reserved[1] & 0xffff;
dirty.yoffset = (var->reserved[1] >> 16) & 0xffff;
if ((var->reserved[2] & 0xffff) <= dirty.xoffset)
return -EINVAL;
if (((var->reserved[2] >> 16) & 0xffff) <= dirty.yoffset)
return -EINVAL;
dirty.width = (var->reserved[2] & 0xffff) - dirty.xoffset;
dirty.height =
((var->reserved[2] >> 16) & 0xffff) - dirty.yoffset;
info->var.yoffset = var->yoffset;
if (dirty.xoffset < 0)
return -EINVAL;
if (dirty.yoffset < 0)
return -EINVAL;
if ((dirty.xoffset + dirty.width) > info->var.xres)
return -EINVAL;
if ((dirty.yoffset + dirty.height) > info->var.yres)
return -EINVAL;
if ((dirty.width <= 0) || (dirty.height <= 0))
return -EINVAL;
dirtyPtr = &dirty;
}
complete(&mfd->msmfb_update_notify);
mutex_lock(&msm_fb_notify_update_sem);
if (mfd->msmfb_no_update_notify_timer.function)
del_timer(&mfd->msmfb_no_update_notify_timer);
mfd->msmfb_no_update_notify_timer.expires =
jiffies + ((1000 * HZ) / 1000);
add_timer(&mfd->msmfb_no_update_notify_timer);
mutex_unlock(&msm_fb_notify_update_sem);
down(&msm_fb_pan_sem);
if (info->node == 0 && !(mfd->cont_splash_done)) { /* primary */
mdp_set_dma_pan_info(info, NULL, TRUE);
if (msm_fb_blank_sub(FB_BLANK_UNBLANK, info, mfd->op_enable)) {
pr_err("%s: can't turn on display!\n", __func__);
return -EINVAL;
}
}
mdp_set_dma_pan_info(info, dirtyPtr,
(var->activate == FB_ACTIVATE_VBL));
mdp_dma_pan_update(info);
up(&msm_fb_pan_sem);
if (unset_bl_level && !bl_updated) {
pdata = (struct msm_fb_panel_data *)mfd->pdev->
dev.platform_data;
if ((pdata) && (pdata->set_backlight)) {
down(&mfd->sem);
mfd->bl_level = unset_bl_level;
pdata->set_backlight(mfd);
bl_level_old = unset_bl_level;
up(&mfd->sem);
bl_updated = 1;
}
}
#ifdef PZ1759_SW1_BACKLIGHT_ON_WORKAROUND
if(!unset_bl_level && !bl_updated)
FrameUpdatedAfterResume =1;
#endif
++mfd->panel_info.frame_count;
return 0;
}
static int msm_fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
if (var->rotate != FB_ROTATE_UR)
return -EINVAL;
if (var->grayscale != info->var.grayscale)
return -EINVAL;
switch (var->bits_per_pixel) {
case 16:
if ((var->green.offset != 5) ||
!((var->blue.offset == 11)
|| (var->blue.offset == 0)) ||
!((var->red.offset == 11)
|| (var->red.offset == 0)) ||
(var->blue.length != 5) ||
(var->green.length != 6) ||
(var->red.length != 5) ||
(var->blue.msb_right != 0) ||
(var->green.msb_right != 0) ||
(var->red.msb_right != 0) ||
(var->transp.offset != 0) ||
(var->transp.length != 0))
return -EINVAL;
break;
case 24:
if ((var->blue.offset != 0) ||
(var->green.offset != 8) ||
(var->red.offset != 16) ||
(var->blue.length != 8) ||
(var->green.length != 8) ||
(var->red.length != 8) ||
(var->blue.msb_right != 0) ||
(var->green.msb_right != 0) ||
(var->red.msb_right != 0) ||
!(((var->transp.offset == 0) &&
(var->transp.length == 0)) ||
((var->transp.offset == 24) &&
(var->transp.length == 8))))
return -EINVAL;
break;
case 32:
/* Figure out if the user meant RGBA or ARGB
and verify the position of the RGB components */
if (var->transp.offset == 24) {
if ((var->blue.offset != 0) ||
(var->green.offset != 8) ||
(var->red.offset != 16))
return -EINVAL;
} else if (var->transp.offset == 0) {
if ((var->blue.offset != 8) ||
(var->green.offset != 16) ||
(var->red.offset != 24))
return -EINVAL;
} else
return -EINVAL;
/* Check the common values for both RGBA and ARGB */
if ((var->blue.length != 8) ||
(var->green.length != 8) ||
(var->red.length != 8) ||
(var->transp.length != 8) ||
(var->blue.msb_right != 0) ||
(var->green.msb_right != 0) ||
(var->red.msb_right != 0))
return -EINVAL;
break;
default:
return -EINVAL;
}
if ((var->xres_virtual <= 0) || (var->yres_virtual <= 0))
return -EINVAL;
if (!bf_supported ||
(info->node != 1 && info->node != 2))
if (info->fix.smem_len <
(var->xres_virtual*
var->yres_virtual*
(var->bits_per_pixel/8)))
return -EINVAL;
if ((var->xres == 0) || (var->yres == 0))
return -EINVAL;
if ((var->xres > MAX(mfd->panel_info.xres,
mfd->panel_info.mode2_xres)) ||
(var->yres > MAX(mfd->panel_info.yres,
mfd->panel_info.mode2_yres)))
return -EINVAL;
if (var->xoffset > (var->xres_virtual - var->xres))
return -EINVAL;
if (var->yoffset > (var->yres_virtual - var->yres))
return -EINVAL;
return 0;
}
int msm_fb_check_frame_rate(struct msm_fb_data_type *mfd
, struct fb_info *info)
{
int panel_height, panel_width, var_frame_rate, fps_mod;
struct fb_var_screeninfo *var = &info->var;
fps_mod = 0;
if ((mfd->panel_info.type == DTV_PANEL) ||
(mfd->panel_info.type == HDMI_PANEL)) {
panel_height = var->yres + var->upper_margin +
var->vsync_len + var->lower_margin;
panel_width = var->xres + var->right_margin +
var->hsync_len + var->left_margin;
var_frame_rate = ((var->pixclock)/(panel_height * panel_width));
if (mfd->var_frame_rate != var_frame_rate) {
fps_mod = 1;
mfd->var_frame_rate = var_frame_rate;
}
}
return fps_mod;
}
static int msm_fb_set_par(struct fb_info *info)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
struct fb_var_screeninfo *var = &info->var;
int old_imgType;
int blank = 0;
old_imgType = mfd->fb_imgType;
switch (var->bits_per_pixel) {
case 16:
if (var->red.offset == 0)
mfd->fb_imgType = MDP_BGR_565;
else
mfd->fb_imgType = MDP_RGB_565;
break;
case 24:
if ((var->transp.offset == 0) && (var->transp.length == 0))
mfd->fb_imgType = MDP_RGB_888;
else if ((var->transp.offset == 24) &&
(var->transp.length == 8)) {
mfd->fb_imgType = MDP_ARGB_8888;
info->var.bits_per_pixel = 32;
}
break;
case 32:
if (var->transp.offset == 24)
mfd->fb_imgType = MDP_ARGB_8888;
else
mfd->fb_imgType = MDP_RGBA_8888;
break;
default:
return -EINVAL;
}
if ((mfd->var_pixclock != var->pixclock) ||
(mfd->hw_refresh && ((mfd->fb_imgType != old_imgType) ||
(mfd->var_pixclock != var->pixclock) ||
(mfd->var_xres != var->xres) ||
(mfd->var_yres != var->yres) ||
(msm_fb_check_frame_rate(mfd, info))))) {
mfd->var_xres = var->xres;
mfd->var_yres = var->yres;
mfd->var_pixclock = var->pixclock;
blank = 1;
}
mfd->fbi->fix.line_length = msm_fb_line_length(mfd->index, var->xres,
var->bits_per_pixel/8);
if (blank) {
msm_fb_blank_sub(FB_BLANK_POWERDOWN, info, mfd->op_enable);
msm_fb_blank_sub(FB_BLANK_UNBLANK, info, mfd->op_enable);
}
return 0;
}
static int msm_fb_stop_sw_refresher(struct msm_fb_data_type *mfd)
{
if (mfd->hw_refresh)
return -EPERM;
if (mfd->sw_currently_refreshing) {
down(&mfd->sem);
mfd->sw_currently_refreshing = FALSE;
up(&mfd->sem);
/* wait until the refresher finishes the last job */
wait_for_completion_killable(&mfd->refresher_comp);
}
return 0;
}
int msm_fb_resume_sw_refresher(struct msm_fb_data_type *mfd)
{
boolean do_refresh;
if (mfd->hw_refresh)
return -EPERM;
down(&mfd->sem);
if ((!mfd->sw_currently_refreshing) && (mfd->sw_refreshing_enable)) {
do_refresh = TRUE;
mfd->sw_currently_refreshing = TRUE;
} else {
do_refresh = FALSE;
}
up(&mfd->sem);
if (do_refresh)
mdp_refresh_screen((unsigned long)mfd);
return 0;
}
#if defined CONFIG_FB_MSM_MDP31
static int mdp_blit_split_height(struct fb_info *info,
struct mdp_blit_req *req)
{
int ret;
struct mdp_blit_req splitreq;
int s_x_0, s_x_1, s_w_0, s_w_1, s_y_0, s_y_1, s_h_0, s_h_1;
int d_x_0, d_x_1, d_w_0, d_w_1, d_y_0, d_y_1, d_h_0, d_h_1;
splitreq = *req;
/* break dest roi at height*/
d_x_0 = d_x_1 = req->dst_rect.x;
d_w_0 = d_w_1 = req->dst_rect.w;
d_y_0 = req->dst_rect.y;
if (req->dst_rect.h % 32 == 3)
d_h_1 = (req->dst_rect.h - 3) / 2 - 1;
else if (req->dst_rect.h % 32 == 2)
d_h_1 = (req->dst_rect.h - 2) / 2 - 6;
else
d_h_1 = (req->dst_rect.h - 1) / 2 - 1;
d_h_0 = req->dst_rect.h - d_h_1;
d_y_1 = d_y_0 + d_h_0;
if (req->dst_rect.h == 3) {
d_h_1 = 2;
d_h_0 = 2;
d_y_1 = d_y_0 + 1;
}
/* blit first region */
if (((splitreq.flags & 0x07) == 0x04) ||
((splitreq.flags & 0x07) == 0x0)) {
if (splitreq.flags & MDP_ROT_90) {
s_y_0 = s_y_1 = req->src_rect.y;
s_h_0 = s_h_1 = req->src_rect.h;
s_x_0 = req->src_rect.x;
s_w_1 = (req->src_rect.w * d_h_1) / req->dst_rect.h;
s_w_0 = req->src_rect.w - s_w_1;
s_x_1 = s_x_0 + s_w_0;
if (d_h_1 >= 8 * s_w_1) {
s_w_1++;
s_x_1--;
}
} else {
s_x_0 = s_x_1 = req->src_rect.x;
s_w_0 = s_w_1 = req->src_rect.w;
s_y_0 = req->src_rect.y;
s_h_1 = (req->src_rect.h * d_h_1) / req->dst_rect.h;
s_h_0 = req->src_rect.h - s_h_1;
s_y_1 = s_y_0 + s_h_0;
if (d_h_1 >= 8 * s_h_1) {
s_h_1++;
s_y_1--;
}
}
splitreq.src_rect.h = s_h_0;
splitreq.src_rect.y = s_y_0;
splitreq.dst_rect.h = d_h_0;
splitreq.dst_rect.y = d_y_0;
splitreq.src_rect.x = s_x_0;
splitreq.src_rect.w = s_w_0;
splitreq.dst_rect.x = d_x_0;
splitreq.dst_rect.w = d_w_0;
} else {
if (splitreq.flags & MDP_ROT_90) {
s_y_0 = s_y_1 = req->src_rect.y;
s_h_0 = s_h_1 = req->src_rect.h;
s_x_0 = req->src_rect.x;
s_w_1 = (req->src_rect.w * d_h_0) / req->dst_rect.h;
s_w_0 = req->src_rect.w - s_w_1;
s_x_1 = s_x_0 + s_w_0;
if (d_h_0 >= 8 * s_w_1) {
s_w_1++;
s_x_1--;
}
} else {
s_x_0 = s_x_1 = req->src_rect.x;
s_w_0 = s_w_1 = req->src_rect.w;
s_y_0 = req->src_rect.y;
s_h_1 = (req->src_rect.h * d_h_0) / req->dst_rect.h;
s_h_0 = req->src_rect.h - s_h_1;
s_y_1 = s_y_0 + s_h_0;
if (d_h_0 >= 8 * s_h_1) {
s_h_1++;
s_y_1--;
}
}
splitreq.src_rect.h = s_h_0;
splitreq.src_rect.y = s_y_0;
splitreq.dst_rect.h = d_h_1;
splitreq.dst_rect.y = d_y_1;
splitreq.src_rect.x = s_x_0;
splitreq.src_rect.w = s_w_0;
splitreq.dst_rect.x = d_x_1;
splitreq.dst_rect.w = d_w_1;
}
ret = mdp_ppp_blit(info, &splitreq);
if (ret)
return ret;
/* blit second region */
if (((splitreq.flags & 0x07) == 0x04) ||
((splitreq.flags & 0x07) == 0x0)) {
splitreq.src_rect.h = s_h_1;
splitreq.src_rect.y = s_y_1;
splitreq.dst_rect.h = d_h_1;
splitreq.dst_rect.y = d_y_1;
splitreq.src_rect.x = s_x_1;
splitreq.src_rect.w = s_w_1;
splitreq.dst_rect.x = d_x_1;
splitreq.dst_rect.w = d_w_1;
} else {
splitreq.src_rect.h = s_h_1;
splitreq.src_rect.y = s_y_1;
splitreq.dst_rect.h = d_h_0;
splitreq.dst_rect.y = d_y_0;
splitreq.src_rect.x = s_x_1;
splitreq.src_rect.w = s_w_1;
splitreq.dst_rect.x = d_x_0;
splitreq.dst_rect.w = d_w_0;
}
ret = mdp_ppp_blit(info, &splitreq);
return ret;
}
#endif
int mdp_blit(struct fb_info *info, struct mdp_blit_req *req)
{
int ret;
#if defined CONFIG_FB_MSM_MDP31 || defined CONFIG_FB_MSM_MDP30
unsigned int remainder = 0, is_bpp_4 = 0;
struct mdp_blit_req splitreq;
int s_x_0, s_x_1, s_w_0, s_w_1, s_y_0, s_y_1, s_h_0, s_h_1;
int d_x_0, d_x_1, d_w_0, d_w_1, d_y_0, d_y_1, d_h_0, d_h_1;
if (req->flags & MDP_ROT_90) {
if (((req->dst_rect.h == 1) && ((req->src_rect.w != 1) ||
(req->dst_rect.w != req->src_rect.h))) ||
((req->dst_rect.w == 1) && ((req->src_rect.h != 1) ||
(req->dst_rect.h != req->src_rect.w)))) {
printk(KERN_ERR "mpd_ppp: error scaling when size is 1!\n");
return -EINVAL;
}
} else {
if (((req->dst_rect.w == 1) && ((req->src_rect.w != 1) ||
(req->dst_rect.h != req->src_rect.h))) ||
((req->dst_rect.h == 1) && ((req->src_rect.h != 1) ||
(req->dst_rect.w != req->src_rect.w)))) {
printk(KERN_ERR "mpd_ppp: error scaling when size is 1!\n");
return -EINVAL;
}
}
#endif
if (unlikely(req->src_rect.h == 0 || req->src_rect.w == 0)) {
printk(KERN_ERR "mpd_ppp: src img of zero size!\n");
return -EINVAL;
}
if (unlikely(req->dst_rect.h == 0 || req->dst_rect.w == 0))
return 0;
#if defined CONFIG_FB_MSM_MDP31
/* MDP width split workaround */
remainder = (req->dst_rect.w)%32;
ret = mdp_get_bytes_per_pixel(req->dst.format,
(struct msm_fb_data_type *)info->par);
if (ret <= 0) {
printk(KERN_ERR "mdp_ppp: incorrect bpp!\n");
return -EINVAL;
}
is_bpp_4 = (ret == 4) ? 1 : 0;
if ((is_bpp_4 && (remainder == 6 || remainder == 14 ||
remainder == 22 || remainder == 30)) || remainder == 3 ||
(remainder == 1 && req->dst_rect.w != 1) ||
(remainder == 2 && req->dst_rect.w != 2)) {
/* make new request as provide by user */
splitreq = *req;
/* break dest roi at width*/
d_y_0 = d_y_1 = req->dst_rect.y;
d_h_0 = d_h_1 = req->dst_rect.h;
d_x_0 = req->dst_rect.x;
if (remainder == 14)
d_w_1 = (req->dst_rect.w - 14) / 2 + 4;
else if (remainder == 22)
d_w_1 = (req->dst_rect.w - 22) / 2 + 10;
else if (remainder == 30)
d_w_1 = (req->dst_rect.w - 30) / 2 + 10;
else if (remainder == 6)
d_w_1 = req->dst_rect.w / 2 - 1;
else if (remainder == 3)
d_w_1 = (req->dst_rect.w - 3) / 2 - 1;
else if (remainder == 2)
d_w_1 = (req->dst_rect.w - 2) / 2 - 6;
else
d_w_1 = (req->dst_rect.w - 1) / 2 - 1;
d_w_0 = req->dst_rect.w - d_w_1;
d_x_1 = d_x_0 + d_w_0;
if (req->dst_rect.w == 3) {
d_w_1 = 2;
d_w_0 = 2;
d_x_1 = d_x_0 + 1;
}
/* blit first region */
if (((splitreq.flags & 0x07) == 0x07) ||
((splitreq.flags & 0x07) == 0x0)) {
if (splitreq.flags & MDP_ROT_90) {
s_x_0 = s_x_1 = req->src_rect.x;
s_w_0 = s_w_1 = req->src_rect.w;
s_y_0 = req->src_rect.y;
s_h_1 = (req->src_rect.h * d_w_1) /
req->dst_rect.w;
s_h_0 = req->src_rect.h - s_h_1;
s_y_1 = s_y_0 + s_h_0;
if (d_w_1 >= 8 * s_h_1) {
s_h_1++;
s_y_1--;
}
} else {
s_y_0 = s_y_1 = req->src_rect.y;
s_h_0 = s_h_1 = req->src_rect.h;
s_x_0 = req->src_rect.x;
s_w_1 = (req->src_rect.w * d_w_1) /
req->dst_rect.w;
s_w_0 = req->src_rect.w - s_w_1;
s_x_1 = s_x_0 + s_w_0;
if (d_w_1 >= 8 * s_w_1) {
s_w_1++;
s_x_1--;
}
}
splitreq.src_rect.h = s_h_0;
splitreq.src_rect.y = s_y_0;
splitreq.dst_rect.h = d_h_0;
splitreq.dst_rect.y = d_y_0;
splitreq.src_rect.x = s_x_0;
splitreq.src_rect.w = s_w_0;
splitreq.dst_rect.x = d_x_0;
splitreq.dst_rect.w = d_w_0;
} else {
if (splitreq.flags & MDP_ROT_90) {
s_x_0 = s_x_1 = req->src_rect.x;
s_w_0 = s_w_1 = req->src_rect.w;
s_y_0 = req->src_rect.y;
s_h_1 = (req->src_rect.h * d_w_0) /
req->dst_rect.w;
s_h_0 = req->src_rect.h - s_h_1;
s_y_1 = s_y_0 + s_h_0;
if (d_w_0 >= 8 * s_h_1) {
s_h_1++;
s_y_1--;
}
} else {
s_y_0 = s_y_1 = req->src_rect.y;
s_h_0 = s_h_1 = req->src_rect.h;
s_x_0 = req->src_rect.x;
s_w_1 = (req->src_rect.w * d_w_0) /
req->dst_rect.w;
s_w_0 = req->src_rect.w - s_w_1;
s_x_1 = s_x_0 + s_w_0;
if (d_w_0 >= 8 * s_w_1) {
s_w_1++;
s_x_1--;
}
}
splitreq.src_rect.h = s_h_0;
splitreq.src_rect.y = s_y_0;
splitreq.dst_rect.h = d_h_1;
splitreq.dst_rect.y = d_y_1;
splitreq.src_rect.x = s_x_0;
splitreq.src_rect.w = s_w_0;
splitreq.dst_rect.x = d_x_1;
splitreq.dst_rect.w = d_w_1;
}
if ((splitreq.dst_rect.h % 32 == 3) ||
((req->dst_rect.h % 32) == 1 && req->dst_rect.h != 1) ||
((req->dst_rect.h % 32) == 2 && req->dst_rect.h != 2))
ret = mdp_blit_split_height(info, &splitreq);
else
ret = mdp_ppp_blit(info, &splitreq);
if (ret)
return ret;
/* blit second region */
if (((splitreq.flags & 0x07) == 0x07) ||
((splitreq.flags & 0x07) == 0x0)) {
splitreq.src_rect.h = s_h_1;
splitreq.src_rect.y = s_y_1;
splitreq.dst_rect.h = d_h_1;
splitreq.dst_rect.y = d_y_1;
splitreq.src_rect.x = s_x_1;
splitreq.src_rect.w = s_w_1;
splitreq.dst_rect.x = d_x_1;
splitreq.dst_rect.w = d_w_1;
} else {
splitreq.src_rect.h = s_h_1;
splitreq.src_rect.y = s_y_1;
splitreq.dst_rect.h = d_h_0;
splitreq.dst_rect.y = d_y_0;
splitreq.src_rect.x = s_x_1;
splitreq.src_rect.w = s_w_1;
splitreq.dst_rect.x = d_x_0;
splitreq.dst_rect.w = d_w_0;
}
if (((splitreq.dst_rect.h % 32) == 3) ||
((req->dst_rect.h % 32) == 1 && req->dst_rect.h != 1) ||
((req->dst_rect.h % 32) == 2 && req->dst_rect.h != 2))
ret = mdp_blit_split_height(info, &splitreq);
else
ret = mdp_ppp_blit(info, &splitreq);
if (ret)
return ret;
} else if ((req->dst_rect.h % 32) == 3 ||
((req->dst_rect.h % 32) == 1 && req->dst_rect.h != 1) ||
((req->dst_rect.h % 32) == 2 && req->dst_rect.h != 2))
ret = mdp_blit_split_height(info, req);
else
ret = mdp_ppp_blit(info, req);
return ret;
#elif defined CONFIG_FB_MSM_MDP30
/* MDP width split workaround */
remainder = (req->dst_rect.w)%16;
ret = mdp_get_bytes_per_pixel(req->dst.format,
(struct msm_fb_data_type *)info->par);
if (ret <= 0) {
printk(KERN_ERR "mdp_ppp: incorrect bpp!\n");
return -EINVAL;
}
is_bpp_4 = (ret == 4) ? 1 : 0;
if ((is_bpp_4 && (remainder == 6 || remainder == 14))) {
/* make new request as provide by user */
splitreq = *req;
/* break dest roi at width*/
d_y_0 = d_y_1 = req->dst_rect.y;
d_h_0 = d_h_1 = req->dst_rect.h;
d_x_0 = req->dst_rect.x;
if (remainder == 14 || remainder == 6)
d_w_1 = req->dst_rect.w / 2;
else
d_w_1 = (req->dst_rect.w - 1) / 2 - 1;
d_w_0 = req->dst_rect.w - d_w_1;
d_x_1 = d_x_0 + d_w_0;
/* blit first region */
if (((splitreq.flags & 0x07) == 0x07) ||
((splitreq.flags & 0x07) == 0x05) ||
((splitreq.flags & 0x07) == 0x02) ||
((splitreq.flags & 0x07) == 0x0)) {
if (splitreq.flags & MDP_ROT_90) {
s_x_0 = s_x_1 = req->src_rect.x;
s_w_0 = s_w_1 = req->src_rect.w;
s_y_0 = req->src_rect.y;
s_h_1 = (req->src_rect.h * d_w_1) /
req->dst_rect.w;
s_h_0 = req->src_rect.h - s_h_1;
s_y_1 = s_y_0 + s_h_0;
if (d_w_1 >= 8 * s_h_1) {
s_h_1++;
s_y_1--;
}
} else {
s_y_0 = s_y_1 = req->src_rect.y;
s_h_0 = s_h_1 = req->src_rect.h;
s_x_0 = req->src_rect.x;
s_w_1 = (req->src_rect.w * d_w_1) /
req->dst_rect.w;
s_w_0 = req->src_rect.w - s_w_1;
s_x_1 = s_x_0 + s_w_0;
if (d_w_1 >= 8 * s_w_1) {
s_w_1++;
s_x_1--;
}
}
splitreq.src_rect.h = s_h_0;
splitreq.src_rect.y = s_y_0;
splitreq.dst_rect.h = d_h_0;
splitreq.dst_rect.y = d_y_0;
splitreq.src_rect.x = s_x_0;
splitreq.src_rect.w = s_w_0;
splitreq.dst_rect.x = d_x_0;
splitreq.dst_rect.w = d_w_0;
} else {
if (splitreq.flags & MDP_ROT_90) {
s_x_0 = s_x_1 = req->src_rect.x;
s_w_0 = s_w_1 = req->src_rect.w;
s_y_0 = req->src_rect.y;
s_h_1 = (req->src_rect.h * d_w_0) /
req->dst_rect.w;
s_h_0 = req->src_rect.h - s_h_1;
s_y_1 = s_y_0 + s_h_0;
if (d_w_0 >= 8 * s_h_1) {
s_h_1++;
s_y_1--;
}
} else {
s_y_0 = s_y_1 = req->src_rect.y;
s_h_0 = s_h_1 = req->src_rect.h;
s_x_0 = req->src_rect.x;
s_w_1 = (req->src_rect.w * d_w_0) /
req->dst_rect.w;
s_w_0 = req->src_rect.w - s_w_1;
s_x_1 = s_x_0 + s_w_0;
if (d_w_0 >= 8 * s_w_1) {
s_w_1++;
s_x_1--;
}
}
splitreq.src_rect.h = s_h_0;
splitreq.src_rect.y = s_y_0;
splitreq.dst_rect.h = d_h_1;
splitreq.dst_rect.y = d_y_1;
splitreq.src_rect.x = s_x_0;
splitreq.src_rect.w = s_w_0;
splitreq.dst_rect.x = d_x_1;
splitreq.dst_rect.w = d_w_1;
}
/* No need to split in height */
ret = mdp_ppp_blit(info, &splitreq);
if (ret)
return ret;
/* blit second region */
if (((splitreq.flags & 0x07) == 0x07) ||
((splitreq.flags & 0x07) == 0x05) ||
((splitreq.flags & 0x07) == 0x02) ||
((splitreq.flags & 0x07) == 0x0)) {
splitreq.src_rect.h = s_h_1;
splitreq.src_rect.y = s_y_1;
splitreq.dst_rect.h = d_h_1;
splitreq.dst_rect.y = d_y_1;
splitreq.src_rect.x = s_x_1;
splitreq.src_rect.w = s_w_1;
splitreq.dst_rect.x = d_x_1;
splitreq.dst_rect.w = d_w_1;
} else {
splitreq.src_rect.h = s_h_1;
splitreq.src_rect.y = s_y_1;
splitreq.dst_rect.h = d_h_0;
splitreq.dst_rect.y = d_y_0;
splitreq.src_rect.x = s_x_1;
splitreq.src_rect.w = s_w_1;
splitreq.dst_rect.x = d_x_0;
splitreq.dst_rect.w = d_w_0;
}
/* No need to split in height ... just width */
ret = mdp_ppp_blit(info, &splitreq);
if (ret)
return ret;
} else
ret = mdp_ppp_blit(info, req);
return ret;
#else
ret = mdp_ppp_blit(info, req);
return ret;
#endif
}
typedef void (*msm_dma_barrier_function_pointer) (void *, size_t);
static inline void msm_fb_dma_barrier_for_rect(struct fb_info *info,
struct mdp_img *img, struct mdp_rect *rect,
msm_dma_barrier_function_pointer dma_barrier_fp
)
{
/*
* Compute the start and end addresses of the rectangles.
* NOTE: As currently implemented, the data between
* the end of one row and the start of the next is
* included in the address range rather than
* doing multiple calls for each row.
*/
unsigned long start;
size_t size;
char * const pmem_start = info->screen_base;
int bytes_per_pixel = mdp_get_bytes_per_pixel(img->format,
(struct msm_fb_data_type *)info->par);
if (bytes_per_pixel <= 0) {
printk(KERN_ERR "%s incorrect bpp!\n", __func__);
return;
}
start = (unsigned long)pmem_start + img->offset +
(img->width * rect->y + rect->x) * bytes_per_pixel;
size = (rect->h * img->width + rect->w) * bytes_per_pixel;
(*dma_barrier_fp) ((void *) start, size);
}
static inline void msm_dma_nc_pre(void)
{
dmb();
}
static inline void msm_dma_wt_pre(void)
{
dmb();
}
static inline void msm_dma_todevice_wb_pre(void *start, size_t size)
{
dma_cache_pre_ops(start, size, DMA_TO_DEVICE);
}
static inline void msm_dma_fromdevice_wb_pre(void *start, size_t size)
{
dma_cache_pre_ops(start, size, DMA_FROM_DEVICE);
}
static inline void msm_dma_nc_post(void)
{
dmb();
}
static inline void msm_dma_fromdevice_wt_post(void *start, size_t size)
{
dma_cache_post_ops(start, size, DMA_FROM_DEVICE);
}
static inline void msm_dma_todevice_wb_post(void *start, size_t size)
{
dma_cache_post_ops(start, size, DMA_TO_DEVICE);
}
static inline void msm_dma_fromdevice_wb_post(void *start, size_t size)
{
dma_cache_post_ops(start, size, DMA_FROM_DEVICE);
}
/*
* Do the write barriers required to guarantee data is committed to RAM
* (from CPU cache or internal buffers) before a DMA operation starts.
* NOTE: As currently implemented, the data between
* the end of one row and the start of the next is
* included in the address range rather than
* doing multiple calls for each row.
*/
static void msm_fb_ensure_memory_coherency_before_dma(struct fb_info *info,
struct mdp_blit_req *req_list,
int req_list_count)
{
#ifdef CONFIG_ARCH_QSD8X50
int i;
/*
* Normally, do the requested barriers for each address
* range that corresponds to a rectangle.
*
* But if at least one write barrier is requested for data
* going to or from the device but no address range is
* needed for that barrier, then do the barrier, but do it
* only once, no matter how many requests there are.
*/
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
switch (mfd->mdp_fb_page_protection) {
default:
case MDP_FB_PAGE_PROTECTION_NONCACHED:
case MDP_FB_PAGE_PROTECTION_WRITECOMBINE:
/*
* The following barrier is only done at most once,
* since further calls would be redundant.
*/
for (i = 0; i < req_list_count; i++) {
if (!(req_list[i].flags
& MDP_NO_DMA_BARRIER_START)) {
msm_dma_nc_pre();
break;
}
}
break;
case MDP_FB_PAGE_PROTECTION_WRITETHROUGHCACHE:
/*
* The following barrier is only done at most once,
* since further calls would be redundant.
*/
for (i = 0; i < req_list_count; i++) {
if (!(req_list[i].flags
& MDP_NO_DMA_BARRIER_START)) {
msm_dma_wt_pre();
break;
}
}
break;
case MDP_FB_PAGE_PROTECTION_WRITEBACKCACHE:
case MDP_FB_PAGE_PROTECTION_WRITEBACKWACACHE:
for (i = 0; i < req_list_count; i++) {
if (!(req_list[i].flags &
MDP_NO_DMA_BARRIER_START)) {
msm_fb_dma_barrier_for_rect(info,
&(req_list[i].src),
&(req_list[i].src_rect),
msm_dma_todevice_wb_pre
);
msm_fb_dma_barrier_for_rect(info,
&(req_list[i].dst),
&(req_list[i].dst_rect),
msm_dma_todevice_wb_pre
);
}
}
break;
}
#else
dmb();
#endif
}
/*
* Do the write barriers required to guarantee data will be re-read from RAM by
* the CPU after a DMA operation ends.
* NOTE: As currently implemented, the data between
* the end of one row and the start of the next is
* included in the address range rather than
* doing multiple calls for each row.
*/
static void msm_fb_ensure_memory_coherency_after_dma(struct fb_info *info,
struct mdp_blit_req *req_list,
int req_list_count)
{
#ifdef CONFIG_ARCH_QSD8X50
int i;
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
switch (mfd->mdp_fb_page_protection) {
default:
case MDP_FB_PAGE_PROTECTION_NONCACHED:
case MDP_FB_PAGE_PROTECTION_WRITECOMBINE:
/*
* The following barrier is only done at most once,
* since further calls would be redundant.
*/
for (i = 0; i < req_list_count; i++) {
if (!(req_list[i].flags
& MDP_NO_DMA_BARRIER_END)) {
msm_dma_nc_post();
break;
}
}
break;
case MDP_FB_PAGE_PROTECTION_WRITETHROUGHCACHE:
for (i = 0; i < req_list_count; i++) {
if (!(req_list[i].flags &
MDP_NO_DMA_BARRIER_END)) {
msm_fb_dma_barrier_for_rect(info,
&(req_list[i].dst),
&(req_list[i].dst_rect),
msm_dma_fromdevice_wt_post
);
}
}
break;
case MDP_FB_PAGE_PROTECTION_WRITEBACKCACHE:
case MDP_FB_PAGE_PROTECTION_WRITEBACKWACACHE:
for (i = 0; i < req_list_count; i++) {
if (!(req_list[i].flags &
MDP_NO_DMA_BARRIER_END)) {
msm_fb_dma_barrier_for_rect(info,
&(req_list[i].dst),
&(req_list[i].dst_rect),
msm_dma_fromdevice_wb_post
);
}
}
break;
}
#else
dmb();
#endif
}
/*
* NOTE: The userspace issues blit operations in a sequence, the sequence
* start with a operation marked START and ends in an operation marked
* END. It is guranteed by the userspace that all the blit operations
* between START and END are only within the regions of areas designated
* by the START and END operations and that the userspace doesnt modify
* those areas. Hence it would be enough to perform barrier/cache operations
* only on the START and END operations.
*/
static int msmfb_blit(struct fb_info *info, void __user *p)
{
/*
* CAUTION: The names of the struct types intentionally *DON'T* match
* the names of the variables declared -- they appear to be swapped.
* Read the code carefully and you should see that the variable names
* make sense.
*/
const int MAX_LIST_WINDOW = 16;
struct mdp_blit_req req_list[MAX_LIST_WINDOW];
struct mdp_blit_req_list req_list_header;
int count, i, req_list_count;
if (bf_supported &&
(info->node == 1 || info->node == 2)) {
pr_err("%s: no pan display for fb%d.",
__func__, info->node);
return -EPERM;
}
/* Get the count size for the total BLIT request. */
if (copy_from_user(&req_list_header, p, sizeof(req_list_header)))
return -EFAULT;
p += sizeof(req_list_header);
count = req_list_header.count;
if (count < 0 || count >= MAX_BLIT_REQ)
return -EINVAL;
while (count > 0) {
/*
* Access the requests through a narrow window to decrease copy
* overhead and make larger requests accessible to the
* coherency management code.
* NOTE: The window size is intended to be larger than the
* typical request size, but not require more than 2
* kbytes of stack storage.
*/
req_list_count = count;
if (req_list_count > MAX_LIST_WINDOW)
req_list_count = MAX_LIST_WINDOW;
if (copy_from_user(&req_list, p,
sizeof(struct mdp_blit_req)*req_list_count))
return -EFAULT;
/*
* Ensure that any data CPU may have previously written to
* internal state (but not yet committed to memory) is
* guaranteed to be committed to memory now.
*/
msm_fb_ensure_memory_coherency_before_dma(info,
req_list, req_list_count);
/*
* Do the blit DMA, if required -- returning early only if
* there is a failure.
*/
for (i = 0; i < req_list_count; i++) {
if (!(req_list[i].flags & MDP_NO_BLIT)) {
/* Do the actual blit. */
int ret = mdp_blit(info, &(req_list[i]));
/*
* Note that early returns don't guarantee
* memory coherency.
*/
if (ret)
return ret;
}
}
/*
* Ensure that CPU cache and other internal CPU state is
* updated to reflect any change in memory modified by MDP blit
* DMA.
*/
msm_fb_ensure_memory_coherency_after_dma(info,
req_list,
req_list_count);
/* Go to next window of requests. */
count -= req_list_count;
p += sizeof(struct mdp_blit_req)*req_list_count;
}
return 0;
}
#ifdef CONFIG_FB_MSM_OVERLAY
static int msmfb_overlay_get(struct fb_info *info, void __user *p)
{
struct mdp_overlay req;
int ret;
if (copy_from_user(&req, p, sizeof(req)))
return -EFAULT;
ret = mdp4_overlay_get(info, &req);
if (ret) {
printk(KERN_ERR "%s: ioctl failed \n",
__func__);
return ret;
}
if (copy_to_user(p, &req, sizeof(req))) {
printk(KERN_ERR "%s: copy2user failed \n",
__func__);
return -EFAULT;
}
return 0;
}
static int msmfb_overlay_set(struct fb_info *info, void __user *p)
{
struct mdp_overlay req;
int ret;
if (copy_from_user(&req, p, sizeof(req)))
return -EFAULT;
ret = mdp4_overlay_set(info, &req);
if (ret) {
printk(KERN_ERR "%s: ioctl failed, rc=%d\n",
__func__, ret);
return ret;
}
if (copy_to_user(p, &req, sizeof(req))) {
printk(KERN_ERR "%s: copy2user failed \n",
__func__);
return -EFAULT;
}
return 0;
}
static int msmfb_overlay_unset(struct fb_info *info, unsigned long *argp)
{
int ret, ndx;
ret = copy_from_user(&ndx, argp, sizeof(ndx));
if (ret) {
printk(KERN_ERR "%s:msmfb_overlay_unset ioctl failed \n",
__func__);
return ret;
}
return mdp4_overlay_unset(info, ndx);
}
static int msmfb_overlay_play_wait(struct fb_info *info, unsigned long *argp)
{
int ret;
struct msmfb_overlay_data req;
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
if (mfd->overlay_play_enable == 0) /* nothing to do */
return 0;
ret = copy_from_user(&req, argp, sizeof(req));
if (ret) {
pr_err("%s:msmfb_overlay_wait ioctl failed", __func__);
return ret;
}
ret = mdp4_overlay_play_wait(info, &req);
return ret;
}
static int msmfb_overlay_play(struct fb_info *info, unsigned long *argp)
{
int ret;
struct msmfb_overlay_data req;
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
struct msm_fb_panel_data *pdata;
if (mfd->overlay_play_enable == 0) /* nothing to do */
return 0;
ret = copy_from_user(&req, argp, sizeof(req));
if (ret) {
printk(KERN_ERR "%s:msmfb_overlay_play ioctl failed \n",
__func__);
return ret;
}
complete(&mfd->msmfb_update_notify);
mutex_lock(&msm_fb_notify_update_sem);
if (mfd->msmfb_no_update_notify_timer.function)
del_timer(&mfd->msmfb_no_update_notify_timer);
mfd->msmfb_no_update_notify_timer.expires =
jiffies + ((1000 * HZ) / 1000);
add_timer(&mfd->msmfb_no_update_notify_timer);
mutex_unlock(&msm_fb_notify_update_sem);
if (info->node == 0 && !(mfd->cont_splash_done)) { /* primary */
mdp_set_dma_pan_info(info, NULL, TRUE);
if (msm_fb_blank_sub(FB_BLANK_UNBLANK, info, mfd->op_enable)) {
pr_err("%s: can't turn on display!\n", __func__);
return -EINVAL;
}
}
ret = mdp4_overlay_play(info, &req);
if (unset_bl_level && !bl_updated) {
pdata = (struct msm_fb_panel_data *)mfd->pdev->
dev.platform_data;
if ((pdata) && (pdata->set_backlight)) {
down(&mfd->sem);
mfd->bl_level = unset_bl_level;
pdata->set_backlight(mfd);
bl_level_old = unset_bl_level;
up(&mfd->sem);
bl_updated = 1;
}
}
return ret;
}
static int msmfb_overlay_play_enable(struct fb_info *info, unsigned long *argp)
{
int ret, enable;
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
ret = copy_from_user(&enable, argp, sizeof(enable));
if (ret) {
printk(KERN_ERR "%s:msmfb_overlay_play_enable ioctl failed \n",
__func__);
return ret;
}
mfd->overlay_play_enable = enable;
return 0;
}
static int msmfb_overlay_blt(struct fb_info *info, unsigned long *argp)
{
int ret;
struct msmfb_overlay_blt req;
ret = copy_from_user(&req, argp, sizeof(req));
if (ret) {
pr_err("%s: failed\n", __func__);
return ret;
}
ret = mdp4_overlay_blt(info, &req);
return ret;
}
static int msmfb_overlay_blt_off(struct fb_info *info, unsigned long *argp)
{
int ret;
struct msmfb_overlay_blt req;
ret = copy_from_user(&req, argp, sizeof(req));
if (ret) {
pr_err("%s: failed\n", __func__);
return ret;
}
ret = mdp4_overlay_blt_offset(info, &req);
ret = copy_to_user(argp, &req, sizeof(req));
if (ret)
printk(KERN_ERR "%s:msmfb_overlay_blt_off ioctl failed\n",
__func__);
return ret;
}
#ifdef CONFIG_FB_MSM_WRITEBACK_MSM_PANEL
static int msmfb_overlay_ioctl_writeback_init(struct fb_info *info)
{
return mdp4_writeback_init(info);
}
static int msmfb_overlay_ioctl_writeback_start(
struct fb_info *info)
{
int ret = 0;
ret = mdp4_writeback_start(info);
if (ret)
goto error;
error:
if (ret)
pr_err("%s:msmfb_writeback_start "
" ioctl failed\n", __func__);
return ret;
}
static int msmfb_overlay_ioctl_writeback_stop(
struct fb_info *info)
{
int ret = 0;
ret = mdp4_writeback_stop(info);
if (ret)
goto error;
error:
if (ret)
pr_err("%s:msmfb_writeback_stop ioctl failed\n",
__func__);
return ret;
}
static int msmfb_overlay_ioctl_writeback_queue_buffer(
struct fb_info *info, unsigned long *argp)
{
int ret = 0;
struct msmfb_data data;
ret = copy_from_user(&data, argp, sizeof(data));
if (ret)
goto error;
ret = mdp4_writeback_queue_buffer(info, &data);
if (ret)
goto error;
error:
if (ret)
pr_err("%s:msmfb_writeback_queue_buffer ioctl failed\n",
__func__);
return ret;
}
static int msmfb_overlay_ioctl_writeback_dequeue_buffer(
struct fb_info *info, unsigned long *argp)
{
int ret = 0;
struct msmfb_data data;
ret = copy_from_user(&data, argp, sizeof(data));
if (ret)
goto error;
ret = mdp4_writeback_dequeue_buffer(info, &data);
if (ret)
goto error;
ret = copy_to_user(argp, &data, sizeof(data));
if (ret)
goto error;
error:
if (ret)
pr_err("%s:msmfb_writeback_dequeue_buffer ioctl failed\n",
__func__);
return ret;
}
static int msmfb_overlay_ioctl_writeback_terminate(struct fb_info *info)
{
return mdp4_writeback_terminate(info);
}
#else
static int msmfb_overlay_ioctl_writeback_init(struct fb_info *info)
{
return -ENOTSUPP;
}
static int msmfb_overlay_ioctl_writeback_start(
struct fb_info *info)
{
return -ENOTSUPP;
}
static int msmfb_overlay_ioctl_writeback_stop(
struct fb_info *info)
{
return -ENOTSUPP;
}
static int msmfb_overlay_ioctl_writeback_queue_buffer(
struct fb_info *info, unsigned long *argp)
{
return -ENOTSUPP;
}
static int msmfb_overlay_ioctl_writeback_dequeue_buffer(
struct fb_info *info, unsigned long *argp)
{
return -ENOTSUPP;
}
static int msmfb_overlay_ioctl_writeback_terminate(struct fb_info *info)
{
return -ENOTSUPP;
}
#endif
static int msmfb_overlay_3d_sbys(struct fb_info *info, unsigned long *argp)
{
int ret;
struct msmfb_overlay_3d req;
ret = copy_from_user(&req, argp, sizeof(req));
if (ret) {
pr_err("%s:msmfb_overlay_3d_ctrl ioctl failed\n",
__func__);
return ret;
}
ret = mdp4_overlay_3d_sbys(info, &req);
return ret;
}
static int msmfb_mixer_info(struct fb_info *info, unsigned long *argp)
{
int ret, cnt;
struct msmfb_mixer_info_req req;
ret = copy_from_user(&req, argp, sizeof(req));
if (ret) {
pr_err("%s: failed\n", __func__);
return ret;
}
cnt = mdp4_mixer_info(req.mixer_num, req.info);
req.cnt = cnt;
ret = copy_to_user(argp, &req, sizeof(req));
if (ret)
pr_err("%s:msmfb_overlay_blt_off ioctl failed\n",
__func__);
return cnt;
}
#endif
DEFINE_SEMAPHORE(msm_fb_ioctl_ppp_sem);
DEFINE_MUTEX(msm_fb_ioctl_lut_sem);
/* Set color conversion matrix from user space */
#ifndef CONFIG_FB_MSM_MDP40
static void msmfb_set_color_conv(struct mdp_ccs *p)
{
int i;
if (p->direction == MDP_CCS_RGB2YUV) {
/* MDP cmd block enable */
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
/* RGB->YUV primary forward matrix */
for (i = 0; i < MDP_CCS_SIZE; i++)
writel(p->ccs[i], MDP_CSC_PFMVn(i));
#ifdef CONFIG_FB_MSM_MDP31
for (i = 0; i < MDP_BV_SIZE; i++)
writel(p->bv[i], MDP_CSC_POST_BV2n(i));
#endif
/* MDP cmd block disable */
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
} else {
/* MDP cmd block enable */
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
/* YUV->RGB primary reverse matrix */
for (i = 0; i < MDP_CCS_SIZE; i++)
writel(p->ccs[i], MDP_CSC_PRMVn(i));
for (i = 0; i < MDP_BV_SIZE; i++)
writel(p->bv[i], MDP_CSC_PRE_BV1n(i));
/* MDP cmd block disable */
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
}
}
#else
static void msmfb_set_color_conv(struct mdp_csc *p)
{
mdp4_vg_csc_update(p);
}
#endif
static int msmfb_notify_update(struct fb_info *info, unsigned long *argp)
{
int ret, notify;
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
ret = copy_from_user(¬ify, argp, sizeof(int));
if (ret) {
pr_err("%s:ioctl failed\n", __func__);
return ret;
}
if (notify > NOTIFY_UPDATE_STOP)
return -EINVAL;
if (notify == NOTIFY_UPDATE_START) {
INIT_COMPLETION(mfd->msmfb_update_notify);
wait_for_completion_interruptible(&mfd->msmfb_update_notify);
} else {
INIT_COMPLETION(mfd->msmfb_no_update_notify);
wait_for_completion_interruptible(&mfd->msmfb_no_update_notify);
}
return 0;
}
static int msmfb_handle_pp_ioctl(struct msmfb_mdp_pp *pp_ptr)
{
int ret = -1;
if (!pp_ptr)
return ret;
switch (pp_ptr->op) {
#ifdef CONFIG_FB_MSM_MDP40
case mdp_op_csc_cfg:
ret = mdp4_csc_config(&(pp_ptr->data.csc_cfg_data));
break;
case mdp_op_pcc_cfg:
ret = mdp4_pcc_cfg(&(pp_ptr->data.pcc_cfg_data));
break;
case mdp_op_lut_cfg:
switch (pp_ptr->data.lut_cfg_data.lut_type) {
case mdp_lut_igc:
ret = mdp4_igc_lut_config(
(struct mdp_igc_lut_data *)
&pp_ptr->data.lut_cfg_data.data);
break;
case mdp_lut_pgc:
ret = mdp4_argc_cfg(
&pp_ptr->data.lut_cfg_data.data.pgc_lut_data);
break;
case mdp_lut_hist:
ret = mdp_hist_lut_config(
(struct mdp_hist_lut_data *)
&pp_ptr->data.lut_cfg_data.data);
break;
default:
break;
}
break;
case mdp_op_qseed_cfg:
ret = mdp4_qseed_cfg((struct mdp_qseed_cfg_data *)
&pp_ptr->data.qseed_cfg_data);
break;
#endif
default:
pr_warn("Unsupported request to MDP_PP IOCTL.\n");
ret = -EINVAL;
break;
}
return ret;
}
static int msm_fb_ioctl(struct fb_info *info, unsigned int cmd,
unsigned long arg)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
void __user *argp = (void __user *)arg;
struct fb_cursor cursor;
struct fb_cmap cmap;
struct mdp_histogram_data hist;
struct mdp_histogram_start_req hist_req;
uint32_t block;
#ifndef CONFIG_FB_MSM_MDP40
struct mdp_ccs ccs_matrix;
#else
struct mdp_csc csc_matrix;
#endif
struct mdp_page_protection fb_page_protection;
struct msmfb_mdp_pp mdp_pp;
int ret = 0;
#if defined(CONFIG_F_SKYDISP_LCD_FORCE_ONOFF)
boolean enable;
#endif
switch (cmd) {
#ifdef CONFIG_FB_MSM_OVERLAY
case MSMFB_OVERLAY_GET:
down(&msm_fb_ioctl_ppp_sem);
ret = msmfb_overlay_get(info, argp);
up(&msm_fb_ioctl_ppp_sem);
break;
case MSMFB_OVERLAY_SET:
down(&msm_fb_ioctl_ppp_sem);
ret = msmfb_overlay_set(info, argp);
up(&msm_fb_ioctl_ppp_sem);
break;
case MSMFB_OVERLAY_UNSET:
down(&msm_fb_ioctl_ppp_sem);
ret = msmfb_overlay_unset(info, argp);
up(&msm_fb_ioctl_ppp_sem);
break;
case MSMFB_OVERLAY_PLAY:
down(&msm_fb_ioctl_ppp_sem);
ret = msmfb_overlay_play(info, argp);
up(&msm_fb_ioctl_ppp_sem);
break;
case MSMFB_OVERLAY_PLAY_ENABLE:
down(&msm_fb_ioctl_ppp_sem);
ret = msmfb_overlay_play_enable(info, argp);
up(&msm_fb_ioctl_ppp_sem);
break;
case MSMFB_OVERLAY_PLAY_WAIT:
down(&msm_fb_ioctl_ppp_sem);
ret = msmfb_overlay_play_wait(info, argp);
up(&msm_fb_ioctl_ppp_sem);
break;
case MSMFB_OVERLAY_BLT:
down(&msm_fb_ioctl_ppp_sem);
ret = msmfb_overlay_blt(info, argp);
up(&msm_fb_ioctl_ppp_sem);
break;
case MSMFB_OVERLAY_BLT_OFFSET:
down(&msm_fb_ioctl_ppp_sem);
ret = msmfb_overlay_blt_off(info, argp);
up(&msm_fb_ioctl_ppp_sem);
break;
case MSMFB_OVERLAY_3D:
down(&msm_fb_ioctl_ppp_sem);
ret = msmfb_overlay_3d_sbys(info, argp);
up(&msm_fb_ioctl_ppp_sem);
break;
case MSMFB_MIXER_INFO:
down(&msm_fb_ioctl_ppp_sem);
ret = msmfb_mixer_info(info, argp);
up(&msm_fb_ioctl_ppp_sem);
break;
case MSMFB_WRITEBACK_INIT:
ret = msmfb_overlay_ioctl_writeback_init(info);
break;
case MSMFB_WRITEBACK_START:
ret = msmfb_overlay_ioctl_writeback_start(
info);
break;
case MSMFB_WRITEBACK_STOP:
ret = msmfb_overlay_ioctl_writeback_stop(
info);
break;
case MSMFB_WRITEBACK_QUEUE_BUFFER:
ret = msmfb_overlay_ioctl_writeback_queue_buffer(
info, argp);
break;
case MSMFB_WRITEBACK_DEQUEUE_BUFFER:
ret = msmfb_overlay_ioctl_writeback_dequeue_buffer(
info, argp);
break;
case MSMFB_WRITEBACK_TERMINATE:
ret = msmfb_overlay_ioctl_writeback_terminate(info);
break;
#endif
case MSMFB_BLIT:
down(&msm_fb_ioctl_ppp_sem);
ret = msmfb_blit(info, argp);
up(&msm_fb_ioctl_ppp_sem);
break;
/* Ioctl for setting ccs matrix from user space */
case MSMFB_SET_CCS_MATRIX:
#ifndef CONFIG_FB_MSM_MDP40
ret = copy_from_user(&ccs_matrix, argp, sizeof(ccs_matrix));
if (ret) {
printk(KERN_ERR
"%s:MSMFB_SET_CCS_MATRIX ioctl failed \n",
__func__);
return ret;
}
down(&msm_fb_ioctl_ppp_sem);
if (ccs_matrix.direction == MDP_CCS_RGB2YUV)
mdp_ccs_rgb2yuv = ccs_matrix;
else
mdp_ccs_yuv2rgb = ccs_matrix;
msmfb_set_color_conv(&ccs_matrix) ;
up(&msm_fb_ioctl_ppp_sem);
#else
ret = copy_from_user(&csc_matrix, argp, sizeof(csc_matrix));
if (ret) {
pr_err("%s:MSMFB_SET_CSC_MATRIX ioctl failed\n",
__func__);
return ret;
}
down(&msm_fb_ioctl_ppp_sem);
msmfb_set_color_conv(&csc_matrix);
up(&msm_fb_ioctl_ppp_sem);
#endif
break;
/* Ioctl for getting ccs matrix to user space */
case MSMFB_GET_CCS_MATRIX:
#ifndef CONFIG_FB_MSM_MDP40
ret = copy_from_user(&ccs_matrix, argp, sizeof(ccs_matrix)) ;
if (ret) {
printk(KERN_ERR
"%s:MSMFB_GET_CCS_MATRIX ioctl failed \n",
__func__);
return ret;
}
down(&msm_fb_ioctl_ppp_sem);
if (ccs_matrix.direction == MDP_CCS_RGB2YUV)
ccs_matrix = mdp_ccs_rgb2yuv;
else
ccs_matrix = mdp_ccs_yuv2rgb;
ret = copy_to_user(argp, &ccs_matrix, sizeof(ccs_matrix));
if (ret) {
printk(KERN_ERR
"%s:MSMFB_GET_CCS_MATRIX ioctl failed \n",
__func__);
return ret ;
}
up(&msm_fb_ioctl_ppp_sem);
#else
ret = -EINVAL;
#endif
break;
case MSMFB_GRP_DISP:
#ifdef CONFIG_FB_MSM_MDP22
{
unsigned long grp_id;
ret = copy_from_user(&grp_id, argp, sizeof(grp_id));
if (ret)
return ret;
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
writel(grp_id, MDP_FULL_BYPASS_WORD43);
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF,
FALSE);
break;
}
#else
return -EFAULT;
#endif
case MSMFB_SUSPEND_SW_REFRESHER:
if (!mfd->panel_power_on)
return -EPERM;
mfd->sw_refreshing_enable = FALSE;
ret = msm_fb_stop_sw_refresher(mfd);
break;
case MSMFB_RESUME_SW_REFRESHER:
if (!mfd->panel_power_on)
return -EPERM;
mfd->sw_refreshing_enable = TRUE;
ret = msm_fb_resume_sw_refresher(mfd);
break;
case MSMFB_CURSOR:
ret = copy_from_user(&cursor, argp, sizeof(cursor));
if (ret)
return ret;
ret = msm_fb_cursor(info, &cursor);
break;
case MSMFB_SET_LUT:
ret = copy_from_user(&cmap, argp, sizeof(cmap));
if (ret)
return ret;
mutex_lock(&msm_fb_ioctl_lut_sem);
ret = msm_fb_set_lut(&cmap, info);
mutex_unlock(&msm_fb_ioctl_lut_sem);
break;
case MSMFB_HISTOGRAM:
if (!mfd->panel_power_on)
return -EPERM;
if (!mfd->do_histogram)
return -ENODEV;
ret = copy_from_user(&hist, argp, sizeof(hist));
if (ret)
return ret;
ret = mfd->do_histogram(info, &hist);
break;
case MSMFB_HISTOGRAM_START:
if (!mfd->panel_power_on)
return -EPERM;
if (!mfd->do_histogram)
return -ENODEV;
ret = copy_from_user(&hist_req, argp, sizeof(hist_req));
if (ret)
return ret;
ret = mdp_histogram_start(&hist_req);
break;
case MSMFB_HISTOGRAM_STOP:
if (!mfd->do_histogram)
return -ENODEV;
ret = copy_from_user(&block, argp, sizeof(int));
if (ret)
return ret;
ret = mdp_histogram_stop(info, block);
break;
case MSMFB_GET_PAGE_PROTECTION:
fb_page_protection.page_protection
= mfd->mdp_fb_page_protection;
ret = copy_to_user(argp, &fb_page_protection,
sizeof(fb_page_protection));
if (ret)
return ret;
break;
case MSMFB_NOTIFY_UPDATE:
ret = msmfb_notify_update(info, argp);
break;
case MSMFB_SET_PAGE_PROTECTION:
#if defined CONFIG_ARCH_QSD8X50 || defined CONFIG_ARCH_MSM8X60
ret = copy_from_user(&fb_page_protection, argp,
sizeof(fb_page_protection));
if (ret)
return ret;
/* Validate the proposed page protection settings. */
switch (fb_page_protection.page_protection) {
case MDP_FB_PAGE_PROTECTION_NONCACHED:
case MDP_FB_PAGE_PROTECTION_WRITECOMBINE:
case MDP_FB_PAGE_PROTECTION_WRITETHROUGHCACHE:
/* Write-back cache (read allocate) */
case MDP_FB_PAGE_PROTECTION_WRITEBACKCACHE:
/* Write-back cache (write allocate) */
case MDP_FB_PAGE_PROTECTION_WRITEBACKWACACHE:
mfd->mdp_fb_page_protection =
fb_page_protection.page_protection;
break;
default:
ret = -EINVAL;
break;
}
#else
/*
* Don't allow caching until 7k DMA cache operations are
* available.
*/
ret = -EINVAL;
#endif
break;
case MSMFB_MDP_PP:
ret = copy_from_user(&mdp_pp, argp, sizeof(mdp_pp));
if (ret)
return ret;
ret = msmfb_handle_pp_ioctl(&mdp_pp);
break;
#ifdef CONFIG_F_SKYDISP_LCD_RESET
case MSMFB_SKY_LCD_RESET_INIT:
MSM_FB_ERR("[LIVED] LCD reset initialization\n");
down(&msm_fb_ioctl_ppp_sem);
msm_fb_blank_sub_force(0, info, 1);
// Need Real GPIO LCD reset? do not need right now...
msm_fb_blank_sub_force(1, info, 1);
up(&msm_fb_ioctl_ppp_sem);
break;
#endif
#ifdef CONFIG_F_SKYDISP_LCD_FORCE_ONOFF
case MSMFB_SKY_LCD_FORCE_ONOFF:
ret = copy_from_user(&enable, argp, sizeof(enable));
MSM_FB_ERR("[LIVED] LCD force onoff=%d\n", enable);
down(&msm_fb_ioctl_ppp_sem);
msm_fb_blank_sub_force(enable, info, 1);
bl_updated = 1;
up(&msm_fb_ioctl_ppp_sem);
break;
#endif
default:
MSM_FB_INFO("MDP: unknown ioctl (cmd=%x) received!\n", cmd);
ret = -EINVAL;
break;
}
return ret;
}
static int msm_fb_register_driver(void)
{
return platform_driver_register(&msm_fb_driver);
}
#ifdef CONFIG_FB_MSM_WRITEBACK_MSM_PANEL
struct fb_info *msm_fb_get_writeback_fb(void)
{
int c = 0;
for (c = 0; c < fbi_list_index; ++c) {
struct msm_fb_data_type *mfd;
mfd = (struct msm_fb_data_type *)fbi_list[c]->par;
if (mfd->panel.type == WRITEBACK_PANEL)
return fbi_list[c];
}
return NULL;
}
EXPORT_SYMBOL(msm_fb_get_writeback_fb);
int msm_fb_writeback_start(struct fb_info *info)
{
return mdp4_writeback_start(info);
}
EXPORT_SYMBOL(msm_fb_writeback_start);
int msm_fb_writeback_queue_buffer(struct fb_info *info,
struct msmfb_data *data)
{
return mdp4_writeback_queue_buffer(info, data);
}
EXPORT_SYMBOL(msm_fb_writeback_queue_buffer);
int msm_fb_writeback_dequeue_buffer(struct fb_info *info,
struct msmfb_data *data)
{
return mdp4_writeback_dequeue_buffer(info, data);
}
EXPORT_SYMBOL(msm_fb_writeback_dequeue_buffer);
int msm_fb_writeback_stop(struct fb_info *info)
{
return mdp4_writeback_stop(info);
}
EXPORT_SYMBOL(msm_fb_writeback_stop);
int msm_fb_writeback_init(struct fb_info *info)
{
return mdp4_writeback_init(info);
}
EXPORT_SYMBOL(msm_fb_writeback_init);
int msm_fb_writeback_terminate(struct fb_info *info)
{
return mdp4_writeback_terminate(info);
}
EXPORT_SYMBOL(msm_fb_writeback_terminate);
#endif
struct platform_device *msm_fb_add_device(struct platform_device *pdev)
{
struct msm_fb_panel_data *pdata;
struct platform_device *this_dev = NULL;
struct fb_info *fbi;
struct msm_fb_data_type *mfd = NULL;
u32 type, id, fb_num;
if (!pdev)
return NULL;
id = pdev->id;
pdata = pdev->dev.platform_data;
if (!pdata)
return NULL;
type = pdata->panel_info.type;
#if defined MSM_FB_NUM
/*
* over written fb_num which defined
* at panel_info
*
*/
if (type == HDMI_PANEL || type == DTV_PANEL ||
type == TV_PANEL || type == WRITEBACK_PANEL) {
if (hdmi_prim_display)
pdata->panel_info.fb_num = 2;
else
#ifdef CONFIG_F_SKYDISP_HDMI_CAPTION
{
if (type != DTV_PANEL)
pdata->panel_info.fb_num = 1;
#else
pdata->panel_info.fb_num = 1;
#endif
#ifdef CONFIG_F_SKYDISP_HDMI_CAPTION
}
#endif
}
else
pdata->panel_info.fb_num = MSM_FB_NUM;
MSM_FB_INFO("setting pdata->panel_info.fb_num to %d. type: %d\n",
pdata->panel_info.fb_num, type);
#endif
fb_num = pdata->panel_info.fb_num;
if (fb_num <= 0)
return NULL;
if (fbi_list_index >= MAX_FBI_LIST) {
printk(KERN_ERR "msm_fb: no more framebuffer info list!\n");
return NULL;
}
/*
* alloc panel device data
*/
this_dev = msm_fb_device_alloc(pdata, type, id);
if (!this_dev) {
printk(KERN_ERR
"%s: msm_fb_device_alloc failed!\n", __func__);
return NULL;
}
/*
* alloc framebuffer info + par data
*/
fbi = framebuffer_alloc(sizeof(struct msm_fb_data_type), NULL);
if (fbi == NULL) {
platform_device_put(this_dev);
printk(KERN_ERR "msm_fb: can't alloca framebuffer info data!\n");
return NULL;
}
mfd = (struct msm_fb_data_type *)fbi->par;
mfd->key = MFD_KEY;
mfd->fbi = fbi;
mfd->panel.type = type;
mfd->panel.id = id;
mfd->fb_page = fb_num;
mfd->index = fbi_list_index;
mfd->mdp_fb_page_protection = MDP_FB_PAGE_PROTECTION_WRITECOMBINE;
mfd->iclient = iclient;
/* link to the latest pdev */
mfd->pdev = this_dev;
mfd_list[mfd_list_index++] = mfd;
fbi_list[fbi_list_index++] = fbi;
/*
* set driver data
*/
platform_set_drvdata(this_dev, mfd);
if (platform_device_add(this_dev)) {
printk(KERN_ERR "msm_fb: platform_device_add failed!\n");
platform_device_put(this_dev);
framebuffer_release(fbi);
fbi_list_index--;
return NULL;
}
return this_dev;
}
EXPORT_SYMBOL(msm_fb_add_device);
int get_fb_phys_info(unsigned long *start, unsigned long *len, int fb_num,
int subsys_id)
{
struct fb_info *info;
struct msm_fb_data_type *mfd;
if (fb_num > MAX_FBI_LIST ||
(subsys_id != DISPLAY_SUBSYSTEM_ID &&
subsys_id != ROTATOR_SUBSYSTEM_ID)) {
pr_err("%s(): Invalid parameters\n", __func__);
return -1;
}
info = fbi_list[fb_num];
if (!info) {
pr_err("%s(): info is NULL\n", __func__);
return -1;
}
mfd = (struct msm_fb_data_type *)info->par;
if (mfd->map_buffer)
*start = mfd->map_buffer->iova[subsys_id];
else
*start = info->fix.smem_start;
*len = info->fix.smem_len;
return 0;
}
EXPORT_SYMBOL(get_fb_phys_info);
int __init msm_fb_init(void)
{
int rc = -ENODEV;
if (msm_fb_register_driver())
return rc;
#ifdef MSM_FB_ENABLE_DBGFS
{
struct dentry *root;
if ((root = msm_fb_get_debugfs_root()) != NULL) {
msm_fb_debugfs_file_create(root,
"msm_fb_msg_printing_level",
(u32 *) &msm_fb_msg_level);
msm_fb_debugfs_file_create(root,
"mddi_msg_printing_level",
(u32 *) &mddi_msg_level);
msm_fb_debugfs_file_create(root, "msm_fb_debug_enabled",
(u32 *) &msm_fb_debug_enabled);
}
}
#endif
return 0;
}
module_init(msm_fb_init);
| gpl-2.0 |
teamfx/openjfx-8u-dev-tests | functional/ControlsTests/test/javafx/scene/control/test/fxcanvas/FXCanvasTest.java | 13350 | /*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package javafx.scene.control.test.fxcanvas;
import client.test.Keywords;
import client.test.Smoke;
import client.test.ScreenshotCheck;
import test.javaclient.shared.CanvasRunner;
import org.junit.runner.RunWith;
import org.eclipse.swt.SWT;
import org.eclipse.swt.widgets.Event;
import org.jemmy.swt.Shells;
import org.jemmy.swt.lookup.QueueLookup;
import org.jemmy.fx.ByID;
import org.eclipse.swt.graphics.Rectangle;
import org.jemmy.Point;
import javafx.embed.swt.FXCanvas;
import org.eclipse.swt.widgets.Scale;
import org.eclipse.swt.widgets.Shell;
import org.jemmy.interfaces.Selector;
import org.jemmy.action.GetAction;
import org.jemmy.interfaces.Text;
import org.jemmy.fx.control.TextControlWrap;
import org.jemmy.fx.control.CheckBoxWrap;
import org.jemmy.interfaces.Selectable;
import javafx.scene.control.CheckBox;
import javafx.scene.control.TextField;
import javafx.scene.control.Button;
import javafx.scene.Node;
import javafx.scene.Scene;
import javafx.scene.control.test.ControlsTestBase;
import org.jemmy.fx.Root;
import org.jemmy.control.Wrap;
import org.jemmy.interfaces.Keyboard;
import org.jemmy.interfaces.Parent;
import org.jemmy.lookup.LookupCriteria;
import org.junit.After;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.netbeans.jemmy.JemmyProperties;
import static java.lang.Math.*;
import java.util.concurrent.atomic.AtomicInteger;
import org.jemmy.timing.State;
import static org.junit.Assert.*;
import static test.javaclient.shared.JemmyUtils.initJemmy;
import test.javaclient.shared.screenshots.ScreenshotUtils;
@RunWith(CanvasRunner.class)
public class FXCanvasTest extends ControlsTestBase {
static Wrap<? extends Shell> frame;
static Parent frameAsParent;
static Wrap<? extends org.eclipse.swt.widgets.Button> menuBtn;
static Wrap<? extends FXCanvas> fxpane;
static Wrap<? extends Scale> alphaSlider;
Wrap<? extends Scene> scene = null;
Parent<Node> parent = null;
Wrap contentPane;
@BeforeClass
public static void setUpClass() throws Exception {
System.setProperty("javafx.swtinteroperability", "true");
FXCanvasApp.main(null);
JemmyProperties.setCurrentDispatchingModel(JemmyProperties.ROBOT_MODEL_MASK);
frame = Shells.SHELLS.lookup().wrap();
frameAsParent = frame.as(Parent.class);
menuBtn = frameAsParent.lookup(org.eclipse.swt.widgets.Button.class, new ByText(FXCanvasApp.MENU_POPUP_BTN)).wrap();
alphaSlider = frameAsParent.lookup(Scale.class).wrap();
fxpane = frameAsParent.lookup(FXCanvas.class).wrap();
}
@Before
public void setUp() throws InterruptedException {
initJemmy();
JemmyProperties.setCurrentDispatchingModel(JemmyProperties.ROBOT_MODEL_MASK);
frameAsParent.lookup(org.eclipse.swt.widgets.Button.class, new ByText(FXCanvasApp.RESET_BTN)).wrap().mouse().click();
Thread.sleep(500);
scene = findScene(FXCanvasApp.MAIN_CONTAINER_ID);
parent = scene.as(Parent.class, Node.class);
contentPane = parent.lookup(new ByID<Node>(FXCanvasApp.MAIN_CONTAINER_ID)).wrap();
}
@After
public void tearDown() {
}
@ScreenshotCheck
@Smoke
@Test(timeout = 300000)
@Keywords(keywords = "swt")
public void heavyPopupTest() throws Throwable {
common(FXCanvasApp.HEAVY_POPUP_CONTAINER_ID, frameAsParent.lookup(org.eclipse.swt.widgets.Button.class, new ByText(FXCanvasApp.HEAVYWEIGHT_POPUP_BTN)).wrap(), "SWTInteropTest-heavyweight");
}
@ScreenshotCheck
@Test(timeout = 300000)
@Keywords(keywords = "swt")
public void menuPopupTest() throws Throwable {
common(FXCanvasApp.MENU_POPUP_CONTAINER_ID, menuBtn, "SWTInteropTest-menu");
}
@Smoke
@Test(timeout = 300000)
@Keywords(keywords = "swt")
public void mainSceneTest() throws InterruptedException {
checkScene(parent);
}
@ScreenshotCheck
@Test(timeout = 300000)
@Keywords(keywords = "swt")
public void transparencyTest() throws Throwable {
final Scale scale = alphaSlider.getControl();
final AtomicInteger scaleProp = new AtomicInteger();
scale.getDisplay().syncExec(new Runnable() {
public void run() {
scaleProp.set((scale.getMaximum() - scale.getMinimum()) / 2);
}
});
setScale(scale, scaleProp.get());
checkScreenshot("SWTInteropTest-transparency", contentPane);
scale.getDisplay().syncExec(new Runnable() {
public void run() {
scaleProp.set(scale.getMaximum());
}
});
setScale(scale, scaleProp.get());
throwScreenshotError();
}
@Smoke
@Test(timeout = 300000)
@Keywords(keywords = "swt")
public void initialSizesTest() throws InterruptedException {
org.jemmy.Rectangle scene_rect = scene.getScreenBounds();
org.jemmy.Rectangle fxpane_rect = fxpane.getScreenBounds();
assertTrue(fxpane_rect.getWidth() == scene_rect.getWidth());
assertTrue(fxpane_rect.getHeight() == scene_rect.getHeight());
Wrap button = parent.lookup(Button.class, new ByID<Button>(FXCanvasApp.BUTTON_ID)).wrap();
Wrap<? extends TextField> input = parent.lookup(TextField.class, new ByID<TextField>(FXCanvasApp.TEXT_INPUT_ID)).wrap();
org.jemmy.Rectangle button_rect = button.getScreenBounds();
org.jemmy.Rectangle text_box_rect = input.getScreenBounds();
assertFalse(scene_rect.getHeight() < button_rect.getHeight() + text_box_rect.getHeight());
}
@ScreenshotCheck
@Test(timeout = 300000)
@Keywords(keywords = "swt")
public void resizingTest() throws InterruptedException {
double right = frame.getScreenBounds().getWidth() - 1;
double bottom = frame.getScreenBounds().getHeight() - 1;
final Rectangle[] rect = new Rectangle[1];
frame.getControl().getDisplay().syncExec(new Runnable() {
public void run() {
rect[0] = frame.getControl().getBounds();
}
});
final Rectangle newRect = new Rectangle(0, 0, rect[0].width, rect[0].height);
frame.getControl().getDisplay().syncExec(new Runnable() {
public void run() {
frame.getControl().setBounds(newRect);
}
});
right -= frame.getScreenBounds().getX();
bottom -= frame.getScreenBounds().getY();
frame.mouse().move(new Point(right, bottom));
frame.mouse().press();
for (double angle = 0; angle < 3; angle += 0.05) {
frame.mouse().move(new Point(right + 200 + 200 * cos(angle), bottom + 200 + 200 * sin(angle)));
Thread.sleep(10);
}
frame.mouse().move(new Point(right, bottom));
frame.mouse().release();
frame.getControl().getDisplay().syncExec(new Runnable() {
public void run() {
frame.getControl().setBounds(rect[0]);
}
});
ScreenshotUtils.checkScreenshot("SWTInteropTest-resizing", contentPane);
}
@Smoke
@Test(timeout = 300000)
@Keywords(keywords = "swt")
public void focusTest() throws InterruptedException {
Wrap<? extends TextField> input = parent.lookup(TextField.class, new ByID<TextField>(FXCanvasApp.TEXT_INPUT_ID)).wrap();
Wrap button = parent.lookup(Button.class, new ByID<Button>(FXCanvasApp.BUTTON_ID)).wrap();
Wrap<? extends CheckBox> check = parent.lookup(CheckBox.class, new ByID<CheckBox>(FXCanvasApp.CHECK_ID)).wrap();
//requestFocused(check);
check.mouse().click();
isFocused(check, true);
frame.keyboard().pushKey(Keyboard.KeyboardButtons.TAB);
isFocused(input, true);
frame.keyboard().pushKey(Keyboard.KeyboardButtons.TAB);
isFocused(button, true);
frame.keyboard().pushKey(Keyboard.KeyboardButtons.TAB);
isFocused(check, true);
frame.keyboard().pushKey(Keyboard.KeyboardButtons.TAB);
final boolean focused[] = new boolean[1];
menuBtn.getControl().getDisplay().syncExec(new Runnable() {
public void run() {
focused[0] = menuBtn.getControl().isFocusControl();
}
});
assertTrue(focused[0]);
}
protected static void setScale(final Scale scale, final int value) {
final Event event = new Event();
event.type = SWT.Selection;
event.widget = scale;
scale.getDisplay().syncExec(new Runnable() {
public void run() {
scale.setSelection(value);
scale.notifyListeners(SWT.Selection, event);
}
});
}
public void common(String scene_id, Wrap<? extends org.eclipse.swt.widgets.Button> button, String name) throws Throwable {
button.mouse().click();
Wrap<? extends Scene> light_popup_scene = findScene(scene_id);
Parent<Node> light_popup_parent = light_popup_scene.as(Parent.class, Node.class);
Wrap content_pane = light_popup_parent.lookup(new ByID<Node>(scene_id)).wrap();
try {
checkScene(light_popup_parent);
ScreenshotUtils.checkScreenshot(name, content_pane);
} catch (Throwable error) {
button.mouse().click();
throw error;
}
button.mouse().click();
}
protected final void isFocused(final Wrap<? extends Node> wrap, final boolean waitedState) {
wrap.waitState(new State() {
public Object reached() {
if (new GetAction<Boolean>() {
@Override
public void run(Object... parameters) {
setResult(wrap.getControl().isFocused());
}
@Override
public String toString() {
return "isFocused() for" + wrap;
}
}.dispatch(wrap.getEnvironment()) == waitedState) {
return true;
} else {
return null;
}
}
});
}
protected final void requestFocused(final Wrap<? extends Node> wrap) {
new GetAction() {
@Override
public void run(Object... parameters) {
wrap.getControl().requestFocus();
}
@Override
public String toString() {
return "requestFocus() for" + wrap;
}
}.dispatch(wrap.getEnvironment());
}
protected final Wrap<? extends Scene> findScene(final String id) {
return Root.ROOT.lookup(new LookupCriteria<Scene>() {
public boolean check(Scene _scene) {
if (_scene.getRoot().getId().contentEquals(id)) {
return true;
}
return false;
}
}).wrap();
}
protected void checkScene(Parent<Node> parent) {
Wrap button = parent.lookup(Button.class, new ByID<Button>(FXCanvasApp.BUTTON_ID)).wrap();
Wrap<? extends CheckBox> check = parent.lookup(CheckBox.class, new ByID<CheckBox>(FXCanvasApp.CHECK_ID)).wrap();
Wrap<? extends TextField> input = parent.lookup(TextField.class, new ByID<TextField>(FXCanvasApp.TEXT_INPUT_ID)).wrap();
Selector selector = check.as(Selectable.class, Boolean.class).selector();
selector.select(Boolean.FALSE);
button.mouse().move();
button.mouse().click();
check.waitProperty(TextControlWrap.SELECTED_PROP_NAME, CheckBoxWrap.State.CHECKED);
Text text = input.as(Text.class);
text.clear();
String str = "Typed text";
text.type(str);
input.waitProperty(Wrap.TEXT_PROP_NAME, str);
}
static class ByText<T extends org.eclipse.swt.widgets.Button> extends QueueLookup<T> {
String text;
public ByText(String text) {
this.text = text;
}
@Override
protected boolean doCheck(T t) {
return t.getText().compareTo(text) == 0;
}
}
}
| gpl-2.0 |
sics-sse/moped | plugins/LEDLighter/src/main/java/plugins/LEDLighter.java | 1710 | package plugins;
import com.sun.squawk.VM;
import java.lang.Math;
import sics.plugin.PlugInComponent;
import sics.port.PluginPPort;
public class LEDLighter extends PlugInComponent {
public PluginPPort led;
private int k;
public LEDLighter(String[] args) {
super(args);
}
public LEDLighter() {
k = 0;
}
public static void main(String[] args) {
// VM.println("LEDLighter.main()\r\n");
LEDLighter ledLighter = new LEDLighter(args);
ledLighter.run();
// VM.println("LEDLighter-main done\r\n");
}
// public void setSpeed(PluginPPort speed) {
// this.speed = speed;
// }
//
// public void setSteering(PluginPPort steering) {
// this.steering = steering;
// }
public void init() {
// Initiate PluginPPort
led = new PluginPPort(this, "led");
VM.println("new pluginpport led");
}
// public void iter(int p, int e) {
// switch(p) {
// case 1:
// VM.println("a");
// break;
// case 2:
// VM.println("b");
// break;
// default:
// VM.println("c");
// }
// }
private void sleep(int ms) {
try {
Thread.sleep(ms);
} catch (InterruptedException e) {
VM.println("Interrupted.\r\n");
}
}
public void doFunction2() {
VM.println("2|1");
led.write("2|1");
}
private void setled(String str) {
led.write(str);
// VM.println(str);
sleep(500);
}
public void doFunction() {
int i = 0;
while (true) {
i++;
VM.println("1 cycle " + i);
setled("1|0");
setled("2|0");
setled("3|0");
setled("1|1");
setled("2|1");
setled("3|1");
}
}
public void run() {
init();
doFunction();
}
} | gpl-2.0 |
scriptdev2/scriptdev2 | sql/updates/r3148_scriptdev2.sql | 214 | DELETE FROM script_texts WHERE entry=1230035;
INSERT INTO script_texts (entry,content_default,sound,type,language,emote,comment) VALUES
(-1230035,'%s cries out an alarm!',0,2,0,0,'general_angerforge EMOTE_ALARM');
| gpl-2.0 |
alexbirkett/GPSBabel | filterdefs.h | 1421 | /*
Filter definitions.
Copyright (C) 2005 Robert Lipe, [email protected]
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111 USA
*/
/*
* Filters can do some things that modules really shouldn't do.
* This is our (weak) attempt to make that distinction.
*/
extern queue waypt_head;
typedef struct filter_vecs {
filter_init f_init;
filter_process f_process;
filter_deinit f_deinit;
filter_exit f_exit;
arglist_t *args;
} filter_vecs_t;
filter_vecs_t * find_filter_vec(char * const, char **);
void free_filter_vec(filter_vecs_t *);
void disp_filters(int version);
void disp_filter( const char *vecname );
void disp_filter_vec( const char *vecname );
void disp_filter_vecs(void);
void init_filter_vecs(void);
void exit_filter_vecs(void);
| gpl-2.0 |
aosm/gcc_40 | libjava/java/security/acl/AclEntry.java | 5258 | /* AclEntry.java -- An entry in an ACL list.
Copyright (C) 1998 Free Software Foundation, Inc.
This file is part of GNU Classpath.
GNU Classpath is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
GNU Classpath is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with GNU Classpath; see the file COPYING. If not, write to the
Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA.
Linking this library statically or dynamically with other modules is
making a combined work based on this library. Thus, the terms and
conditions of the GNU General Public License cover the whole
combination.
As a special exception, the copyright holders of this library give you
permission to link this library with independent modules to produce an
executable, regardless of the license terms of these independent
modules, and to copy and distribute the resulting executable under
terms of your choice, provided that you also meet, for each linked
independent module, the terms and conditions of the license of that
module. An independent module is a module which is not derived from
or based on this library. If you modify this library, you may extend
this exception to your version of the library, but you are not
obligated to do so. If you do not wish to do so, delete this
exception statement from your version. */
package java.security.acl;
import java.security.Principal;
import java.util.Enumeration;
/**
* This interface models an entry in an access control list (ACL). Java
* ACL's consist of a list of entries, where each consists of a
* <code>Principal</code> and a list of <code>Permission</code>'s which
* have been granted to that <code>Principal</code>. An ACL can also
* be <em>negative</em>, which indicates that the list of
* <code>Permission</code>'s is a list of permissions that are <em>not</em>
* granted to the <code>Principal</code>. A <code>Principal</code> can
* have at most one regular (or positive) ACL entry and one negative
* ACL entry.
*
* @version 0.0
*
* @author Aaron M. Renn ([email protected])
*/
public interface AclEntry extends Cloneable
{
/**
* This method returns the <code>Principal</code> associated with this
* ACL entry.
*
* @return The <code>Principal</code> for this ACL entry
*/
Principal getPrincipal();
/**
* This method sets ths <code>Principal</code> associated with this
* ACL entry. This operation will only succeed if there is not already
* a <code>Principal</code> assigned.
*
* @param user The <code>Principal</code> for this ACL entry
*
* @return <code>true</code> if the <code>Principal</code> was successfully set or <code>false</code> if this entry already has a <code>Principal</code>.
*/
boolean setPrincipal(Principal user);
/**
* This method sets this ACL entry to be a <em>negative</em> entry, indicating
* that it contains a list of permissions that are <em>not</em> granted
* to the entry's <code>Principal</code>. Note that there is no way to
* undo this operation.
*/
void setNegativePermissions();
/**
* This method tests whether or not this ACL entry is a negative entry or not.
*
* @return <code>true</code> if this ACL entry is negative, <code>false</code> otherwise
*/
boolean isNegative();
/**
* This method adds the specified permission to this ACL entry.
*
* @param perm The <code>Permission</code> to add
*
* @return <code>true</code> if the permission was added or <code>false</code> if it was already set for this entry
*/
boolean addPermission(Permission permission);
/**
* This method deletes the specified permission to this ACL entry.
*
* @param perm The <code>Permission</code> to delete from this ACL entry.
*
* @return <code>true</code> if the permission was successfully deleted or <code>false</code> if the permission was not part of this ACL to begin with
*/
boolean removePermission(Permission perm);
/**
* This method tests whether or not the specified permission is associated
* with this ACL entry.
*
* @param perm The <code>Permission</code> to test
*
* @return <code>true</code> if this permission is associated with this entry or <code>false</code> otherwise
*/
boolean checkPermission(Permission permission);
/**
* This method returns a list of all <code>Permission</code> objects
* associated with this ACL entry as an <code>Enumeration</code>.
*
* @return A list of permissions for this ACL entry
*/
Enumeration permissions();
/**
* This method returns this object as a <code>String</code>.
*
* @return A <code>String</code> representation of this object
*/
String toString();
/**
* This method returns a clone of this ACL entry
*
* @return A clone of this ACL entry
*/
Object clone();
}
| gpl-2.0 |
joglomedia/masedi.net | work/berkeley-db/docs/api_reference/C/env.html | 16383 | <?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
<title>Chapter 5. The DB_ENV Handle</title>
<link rel="stylesheet" href="apiReference.css" type="text/css" />
<meta name="generator" content="DocBook XSL Stylesheets V1.73.2" />
<link rel="start" href="index.html" title="Berkeley DB C API Reference" />
<link rel="up" href="index.html" title="Berkeley DB C API Reference" />
<link rel="prev" href="DB_MULTIPLE_RECNO_RESERVE_NEXT.html" title="DB_MULTIPLE_RECNO_RESERVE_NEXT" />
<link rel="next" href="dbgetenv.html" title="DB->get_env()" />
</head>
<body>
<div class="navheader">
<table width="100%" summary="Navigation header">
<tr>
<th colspan="3" align="center">Chapter 5.
The DB_ENV Handle
</th>
</tr>
<tr>
<td width="20%" align="left"><a accesskey="p" href="DB_MULTIPLE_RECNO_RESERVE_NEXT.html">Prev</a> </td>
<th width="60%" align="center"> </th>
<td width="20%" align="right"> <a accesskey="n" href="dbgetenv.html">Next</a></td>
</tr>
</table>
<hr />
</div>
<div class="chapter" lang="en" xml:lang="en">
<div class="titlepage">
<div>
<div>
<h2 class="title"><a id="env"></a>Chapter 5.
The DB_ENV Handle
</h2>
</div>
</div>
</div>
<p>
The <code class="classname">DB_ENV</code> object is the handle for a Berkeley DB environment
— a collection including support for some or all of caching, locking, logging and
transaction subsystems, as well as databases and log files. Methods of the
<code class="classname">DB_ENV</code> handle are used to configure the environment as well as
to operate on subsystems and databases in the environment.
</p>
<p>
<code class="classname">DB_ENV</code> handles
<span>
are created using the <a class="xref" href="envcreate.html" title="db_env_create">db_env_create</a>
method, and
</span>
are opened using the <a class="xref" href="envopen.html" title="DB_ENV->open()">DB_ENV->open()</a>
method.
</p>
<p>
When you are done using your environment, close it using the
<a class="xref" href="envclose.html" title="DB_ENV->close()">DB_ENV->close()</a> method. Before closing your
environment, make sure all open database handles are closed first. See the
<a class="xref" href="dbclose.html" title="DB->close()">DB->close()</a> method for more information.
</p>
<div class="sect1" lang="en" xml:lang="en">
<div class="titlepage">
<div>
<div>
<h2 class="title" style="clear: both"><a id="envlist"></a>Database Environments and Related Methods</h2>
</div>
</div>
</div>
<div class="navtable">
<table border="1" width="80%">
<thead>
<tr>
<th>Database Environment Operations</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>
<a class="xref" href="dbgetenv.html" title="DB->get_env()">DB->get_env()</a>
</td>
<td>Return the DB's underlying DB_ENV handle </td>
</tr>
<tr>
<td>
<a class="xref" href="envclose.html" title="DB_ENV->close()">DB_ENV->close()</a>
</td>
<td>Close an environment</td>
</tr>
<tr>
<td>
<a class="xref" href="envcreate.html" title="db_env_create">db_env_create</a>
</td>
<td>Create an environment handle</td>
</tr>
<tr>
<td>
<a class="xref" href="envdbremove.html" title="DB_ENV->dbremove()">DB_ENV->dbremove()</a>
</td>
<td>Remove a database</td>
</tr>
<tr>
<td>
<a class="xref" href="envdbrename.html" title="DB_ENV->dbrename()">DB_ENV->dbrename()</a>
</td>
<td>Rename a database</td>
</tr>
<tr>
<td>
<a class="xref" href="enverr.html" title="DB_ENV->err()">DB_ENV->err()</a>
</td>
<td>Error message</td>
</tr>
<tr>
<td>
<a class="xref" href="envfailchk.html" title="DB_ENV->failchk()">DB_ENV->failchk()</a>
</td>
<td>Check for thread failure</td>
</tr>
<tr>
<td>
<a class="xref" href="envfileid_reset.html" title="DB_ENV->fileid_reset()">DB_ENV->fileid_reset()</a>
</td>
<td>Reset database file IDs</td>
</tr>
<tr>
<td>
<a class="xref" href="envfullversion.html" title="db_full_version">db_full_version</a>
</td>
<td>Return full version information</td>
</tr>
<tr>
<td>
<a class="xref" href="envget_home.html" title="DB_ENV->get_home()">DB_ENV->get_home()</a>
</td>
<td>Return environment's home directory</td>
</tr>
<tr>
<td>
<a class="xref" href="envget_open_flags.html" title="DB_ENV->get_open_flags()">DB_ENV->get_open_flags()</a>
</td>
<td>Return flags with which the environment was opened</td>
</tr>
<tr>
<td>
<a class="xref" href="envlog_verify.html" title="DB_ENV->log_verify()">DB_ENV->log_verify()</a>
</td>
<td>Verify log files of an environment.</td>
</tr>
<tr>
<td>
<a class="xref" href="envlsn_reset.html" title="DB_ENV->lsn_reset()">DB_ENV->lsn_reset()</a>
</td>
<td>Reset database file LSNs</td>
</tr>
<tr>
<td>
<a class="xref" href="envopen.html" title="DB_ENV->open()">DB_ENV->open()</a>
</td>
<td>Open an environment</td>
</tr>
<tr>
<td>
<a class="xref" href="envremove.html" title="DB_ENV->remove()">DB_ENV->remove()</a>
</td>
<td>Remove an environment</td>
</tr>
<tr>
<td>
<a class="xref" href="envstat.html" title="DB_ENV->stat_print()">DB_ENV->stat_print()</a>
</td>
<td>Environment statistics</td>
</tr>
<tr>
<td>
<a class="xref" href="envstrerror.html" title="db_strerror">db_strerror</a>
</td>
<td>Error strings</td>
</tr>
<tr>
<td>
<a class="xref" href="envversion.html" title="db_version">db_version</a>
</td>
<td>Return version information</td>
</tr>
<tr>
<td colspan="2">
<span class="bold">
<strong>Environment Configuration</strong>
</span>
</td>
</tr>
<tr>
<td>
<a class="xref" href="envadd_data_dir.html" title="DB_ENV->add_data_dir()">DB_ENV->add_data_dir()</a>
</td>
<td>add an environment data directory</td>
</tr>
<tr>
<td>
<a class="xref" href="envset_alloc.html" title="DB_ENV->set_alloc()">DB_ENV->set_alloc()</a>
</td>
<td>Set local space allocation functions</td>
</tr>
<tr>
<td>
<a class="xref" href="envset_app_dispatch.html" title="DB_ENV->set_app_dispatch()">DB_ENV->set_app_dispatch()</a>
</td>
<td>Configure application recovery callback</td>
</tr>
<tr>
<td><a class="xref" href="envset_data_dir.html" title="DB_ENV->set_data_dir()">DB_ENV->set_data_dir()</a>, <a class="xref" href="envget_data_dirs.html" title="DB_ENV->get_data_dirs()">DB_ENV->get_data_dirs()</a></td>
<td>Set/get the environment data directory</td>
</tr>
<tr>
<td><a class="xref" href="envset_create_dir.html" title="DB_ENV->set_create_dir()">DB_ENV->set_create_dir()</a>, <a class="xref" href="envget_create_dir.html" title="DB_ENV->get_create_dir()">DB_ENV->get_create_dir()</a></td>
<td>add an environment data directory</td>
</tr>
<tr>
<td><a class="xref" href="envset_encrypt.html" title="DB_ENV->set_encrypt()">DB_ENV->set_encrypt()</a>, <a class="xref" href="envget_encrypt_flags.html" title="DB_ENV->get_encrypt_flags()">DB_ENV->get_encrypt_flags()</a></td>
<td>Set/get the environment cryptographic key</td>
</tr>
<tr>
<td>
<a class="xref" href="envevent_notify.html" title="DB_ENV->set_event_notify()">DB_ENV->set_event_notify()</a>
</td>
<td>Set event notification callback</td>
</tr>
<tr>
<td>
<a class="xref" href="envset_errcall.html" title="DB_ENV->set_errcall()">DB_ENV->set_errcall()</a>
</td>
<td>Set error message callbacks</td>
</tr>
<tr>
<td><a class="xref" href="envset_errfile.html" title="DB_ENV->set_errfile()">DB_ENV->set_errfile()</a>, <a class="xref" href="envget_errfile.html" title="DB_ENV->get_errfile()">DB_ENV->get_errfile()</a></td>
<td>Set/get error message FILE</td>
</tr>
<tr>
<td><a class="xref" href="envset_errpfx.html" title="DB_ENV->set_errpfx()">DB_ENV->set_errpfx()</a>, <a class="xref" href="envget_errpfx.html" title="DB_ENV->get_errpfx()">DB_ENV->get_errpfx()</a></td>
<td>Set/get error message prefix</td>
</tr>
<tr>
<td>
<a class="xref" href="envset_feedback.html" title="DB_ENV->set_feedback()">DB_ENV->set_feedback()</a>
</td>
<td>Set feedback callback</td>
</tr>
<tr>
<td><a class="xref" href="envset_flags.html" title="DB_ENV->set_flags()">DB_ENV->set_flags()</a>, <a class="xref" href="envget_flags.html" title="DB_ENV->get_flags()">DB_ENV->get_flags()</a></td>
<td>Environment configuration</td>
</tr>
<tr>
<td><a class="xref" href="envset_intermediate_dir_mode.html" title="DB_ENV->set_intermediate_dir_mode()">DB_ENV->set_intermediate_dir_mode()</a>, <a class="xref" href="envget_intermediate_dir_mode.html" title="DB_ENV->get_intermediate_dir_mode()">DB_ENV->get_intermediate_dir_mode()</a></td>
<td>Set/get intermediate directory creation mode</td>
</tr>
<tr>
<td>
<a class="xref" href="envset_isalive.html" title="DB_ENV->set_isalive()">DB_ENV->set_isalive()</a>
</td>
<td>Set thread is-alive callback</td>
</tr>
<tr>
<td>
<a class="xref" href="envset_msgcall.html" title="DB_ENV->set_msgcall()">DB_ENV->set_msgcall()</a>
</td>
<td>Set informational message callback</td>
</tr>
<tr>
<td><a class="xref" href="envset_msgfile.html" title="DB_ENV->set_msgfile()">DB_ENV->set_msgfile()</a>, <a class="xref" href="envget_msgfile.html" title="DB_ENV->get_msgfile()">DB_ENV->get_msgfile()</a></td>
<td>Set/get informational message FILE</td>
</tr>
<tr>
<td><a class="xref" href="envset_shm_key.html" title="DB_ENV->set_shm_key()">DB_ENV->set_shm_key()</a>, <a class="xref" href="envget_shm_key.html" title="DB_ENV->get_shm_key()">DB_ENV->get_shm_key()</a></td>
<td>Set/get system memory shared segment ID</td>
</tr>
<tr>
<td><a class="xref" href="envset_thread_count.html" title="DB_ENV->set_thread_count()">DB_ENV->set_thread_count()</a>, <a class="xref" href="envget_thread_count.html" title="DB_ENV->get_thread_count()">DB_ENV->get_thread_count()</a></td>
<td>Set/get approximate thread count</td>
</tr>
<tr>
<td>
<a class="xref" href="envset_thread_id.html" title="DB_ENV->set_thread_id()">DB_ENV->set_thread_id()</a>
</td>
<td>Set thread of control ID function</td>
</tr>
<tr>
<td>
<a class="xref" href="envset_thread_id_string.html" title="DB_ENV->set_thread_id_string()">DB_ENV->set_thread_id_string()</a>
</td>
<td>Set thread of control ID format function</td>
</tr>
<tr>
<td><a class="xref" href="envset_timeout.html" title="DB_ENV->set_timeout()">DB_ENV->set_timeout()</a>, <a class="xref" href="envget_timeout.html" title="DB_ENV->get_timeout()">DB_ENV->get_timeout()</a></td>
<td>Set/get lock and transaction timeout</td>
</tr>
<tr>
<td><a class="xref" href="envset_tmp_dir.html" title="DB_ENV->set_tmp_dir()">DB_ENV->set_tmp_dir()</a>, <a class="xref" href="envget_tmp_dir.html" title="DB_ENV->get_tmp_dir()">DB_ENV->get_tmp_dir()</a></td>
<td>Set/get the environment temporary file directory</td>
</tr>
<tr>
<td><a class="xref" href="envset_verbose.html" title="DB_ENV->set_verbose()">DB_ENV->set_verbose()</a>, <a class="xref" href="envget_verbose.html" title="DB_ENV->get_verbose()">DB_ENV->get_verbose()</a></td>
<td>Set/get verbose messages</td>
</tr>
<tr>
<td><a class="xref" href="envset_cachesize.html" title="DB_ENV->set_cachesize()">DB_ENV->set_cachesize()</a>, <a class="xref" href="envget_cachesize.html" title="DB_ENV->get_cachesize()">DB_ENV->get_cachesize()</a></td>
<td>Set/get the environment cache size</td>
</tr>
</tbody>
</table>
</div>
</div>
</div>
<div class="navfooter">
<hr />
<table width="100%" summary="Navigation footer">
<tr>
<td width="40%" align="left"><a accesskey="p" href="DB_MULTIPLE_RECNO_RESERVE_NEXT.html">Prev</a> </td>
<td width="20%" align="center"> </td>
<td width="40%" align="right"> <a accesskey="n" href="dbgetenv.html">Next</a></td>
</tr>
<tr>
<td width="40%" align="left" valign="top">DB_MULTIPLE_RECNO_RESERVE_NEXT </td>
<td width="20%" align="center">
<a accesskey="h" href="index.html">Home</a>
</td>
<td width="40%" align="right" valign="top"> DB->get_env()</td>
</tr>
</table>
</div>
</body>
</html>
| gpl-2.0 |
butkevicius/motorola-moto-z-permissive-kernel | kernel/include/soc/qcom/camera2.h | 5112 | /* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __CAMERA2_H__
#define __CAMERA2_H__
#include <media/msm_cam_sensor.h>
#include <linux/interrupt.h>
#include <linux/of_platform.h>
#include <linux/of_device.h>
#include <linux/of.h>
enum msm_camera_device_type_t {
MSM_CAMERA_I2C_DEVICE,
MSM_CAMERA_PLATFORM_DEVICE,
MSM_CAMERA_SPI_DEVICE,
};
enum msm_bus_perf_setting {
S_INIT,
S_PREVIEW,
S_VIDEO,
S_CAPTURE,
S_ZSL,
S_STEREO_VIDEO,
S_STEREO_CAPTURE,
S_DEFAULT,
S_LIVESHOT,
S_DUAL,
S_EXIT
};
struct msm_camera_slave_info {
uint16_t sensor_slave_addr;
uint16_t sensor_id_reg_addr;
uint16_t sensor_id;
uint16_t sensor_id2;
uint16_t sensor_id_mask;
};
struct msm_cam_clk_info {
const char *clk_name;
long clk_rate;
uint32_t delay;
};
struct msm_pinctrl_info {
struct pinctrl *pinctrl;
struct pinctrl_state *gpio_state_active;
struct pinctrl_state *gpio_state_suspend;
bool use_pinctrl;
};
struct msm_cam_clk_setting {
struct msm_cam_clk_info *clk_info;
uint16_t num_clk_info;
uint8_t enable;
};
struct v4l2_subdev_info {
enum v4l2_mbus_pixelcode code;
enum v4l2_colorspace colorspace;
uint16_t fmt;
uint16_t order;
};
struct msm_camera_gpio_num_info {
uint16_t gpio_num[SENSOR_GPIO_MAX];
uint8_t valid[SENSOR_GPIO_MAX];
};
struct msm_camera_gpio_conf {
void *cam_gpiomux_conf_tbl;
uint8_t cam_gpiomux_conf_tbl_size;
struct gpio *cam_gpio_common_tbl;
uint8_t cam_gpio_common_tbl_size;
struct gpio *cam_gpio_req_tbl;
uint8_t cam_gpio_req_tbl_size;
uint32_t gpio_no_mux;
uint32_t *camera_off_table;
uint8_t camera_off_table_size;
uint32_t *camera_on_table;
uint8_t camera_on_table_size;
struct msm_camera_gpio_num_info *gpio_num_info;
};
struct msm_camera_power_ctrl_t {
struct device *dev;
struct msm_sensor_power_setting *power_setting;
uint16_t power_setting_size;
struct msm_sensor_power_setting *power_down_setting;
uint16_t power_down_setting_size;
struct msm_camera_gpio_conf *gpio_conf;
struct camera_vreg_t *cam_vreg;
int num_vreg;
struct msm_camera_i2c_conf *i2c_conf;
struct clk **clk_ptr;
struct msm_cam_clk_info *clk_info;
struct msm_pinctrl_info pinctrl_info;
uint8_t cam_pinctrl_status;
size_t clk_info_size;
};
enum msm_camera_actuator_name {
MSM_ACTUATOR_MAIN_CAM_0,
MSM_ACTUATOR_MAIN_CAM_1,
MSM_ACTUATOR_MAIN_CAM_2,
MSM_ACTUATOR_MAIN_CAM_3,
MSM_ACTUATOR_MAIN_CAM_4,
MSM_ACTUATOR_MAIN_CAM_5,
MSM_ACTUATOR_WEB_CAM_0,
MSM_ACTUATOR_WEB_CAM_1,
MSM_ACTUATOR_WEB_CAM_2,
};
struct msm_actuator_info {
struct i2c_board_info const *board_info;
enum msm_camera_actuator_name cam_name;
int bus_id;
int vcm_pwd;
int vcm_enable;
};
enum msm_camera_i2c_mux_mode {
MODE_R,
MODE_L,
MODE_DUAL
};
struct msm_camera_i2c_conf {
uint8_t use_i2c_mux;
struct platform_device *mux_dev;
enum msm_camera_i2c_mux_mode i2c_mux_mode;
};
struct msm_camera_sensor_board_info {
const char *sensor_name;
const char *eeprom_name;
const char *actuator_name;
const char *ois_name;
struct msm_camera_slave_info *slave_info;
struct msm_camera_csi_lane_params *csi_lane_params;
struct msm_camera_sensor_strobe_flash_data *strobe_flash_data;
struct msm_actuator_info *actuator_info;
struct msm_sensor_info_t *sensor_info;
const char *misc_regulator;
struct msm_camera_power_ctrl_t power_info;
struct msm_camera_sensor_slave_info *cam_slave_info;
};
enum msm_camera_i2c_cmd_type {
MSM_CAMERA_I2C_CMD_WRITE,
MSM_CAMERA_I2C_CMD_POLL,
};
struct msm_camera_i2c_reg_conf {
uint16_t reg_addr;
uint16_t reg_data;
enum msm_camera_i2c_data_type dt;
enum msm_camera_i2c_cmd_type cmd_type;
int16_t mask;
};
struct msm_camera_i2c_conf_array {
struct msm_camera_i2c_reg_conf *conf;
uint16_t size;
uint16_t delay;
enum msm_camera_i2c_data_type data_type;
};
struct eeprom_map_t {
uint32_t valid_size;
uint32_t addr;
uint32_t addr_t;
uint32_t data;
uint32_t data_t;
uint32_t delay;
};
struct eeprom_slave_add_t {
uint32_t addr;
};
struct msm_eeprom_memory_map_t {
struct eeprom_map_t page;
struct eeprom_map_t pageen;
struct eeprom_map_t poll;
struct eeprom_map_t mem;
struct eeprom_slave_add_t saddr;
};
struct msm_eeprom_memory_block_t {
struct msm_eeprom_memory_map_t *map;
uint32_t num_map; /* number of map blocks */
uint8_t *mapdata;
uint32_t num_data; /* size of total mapdata */
};
struct msm_eeprom_cmm_t {
uint32_t cmm_support;
uint32_t cmm_compression;
uint32_t cmm_offset;
uint32_t cmm_size;
};
struct msm_eeprom_board_info {
const char *eeprom_name;
uint16_t i2c_slaveaddr;
struct msm_camera_power_ctrl_t power_info;
struct msm_eeprom_cmm_t cmm_data;
enum i2c_freq_mode_t i2c_freq_mode;
};
#endif
| gpl-2.0 |
Argalep/ServUO | Scripts/Items/Consumables/AnimatedWeaponScroll.cs | 752 | namespace Server.Items
{
public class AnimatedWeaponScroll : SpellScroll
{
[Constructable]
public AnimatedWeaponScroll()
: this(1)
{
}
[Constructable]
public AnimatedWeaponScroll(int amount)
: base(683, 0x2DA4, amount)
{
}
public AnimatedWeaponScroll(Serial serial)
: base(serial)
{
}
public override void Serialize(GenericWriter writer)
{
base.Serialize(writer);
writer.Write(0); // version
}
public override void Deserialize(GenericReader reader)
{
base.Deserialize(reader);
int version = reader.ReadInt();
}
}
} | gpl-2.0 |
santiavenda2/griffith | lib/plugins/movie/PluginMovieOFDb.py | 12757 | # -*- coding: UTF-8 -*-
__revision__ = '$Id$'
# Written by Christian Sagmueller <[email protected]>
# based on PluginMovieIMDB.py, Copyright (c) 2005 Vasco Nunes
# You may use and distribute this software under the terms of the
# GNU General Public License, version 2 or later
import gutils
import movie,string,re
plugin_name = "OFDb"
plugin_description = "Online-Filmdatenbank"
plugin_url = "www.ofdb.de"
plugin_language = _("German")
plugin_author = "Christian Sagmueller, Jessica Katharina Parth"
plugin_author_email = "[email protected]"
plugin_version = "0.11"
class Plugin(movie.Movie):
def __init__(self, id):
self.encode = 'utf-8'
self.movie_id = id
self.url = "http://www.ofdb.de/%s" % str(self.movie_id)
def initialize(self):
# OFDb didn't provide the runtime, studio and classification but it provide a link to the german imdb entry
# lets use the imdb page, why not
imdb_nr = gutils.trim(self.page, 'http://german.imdb.com/Title?', '"')
if imdb_nr != '':
self.imdb_page = self.open_page(url='http://www.imdb.de/Title?' + imdb_nr)
else:
imdb_nr = gutils.trim(self.page, 'http://www.imdb.com/Title?', '"')
if imdb_nr != '':
self.imdb_page = self.open_page(url='http://www.imdb.de/Title?' + imdb_nr)
else:
self.imdb_page = ''
def get_image(self):
self.image_url = "http://img.ofdb.de/film/" + gutils.trim(self.page, 'img src="http://img.ofdb.de/film/', '"' )
def get_o_title(self):
self.o_title = gutils.clean(gutils.trim(self.page, 'Originaltitel:', '</tr>'))
if self.o_title == '':
self.o_title = string.replace(self.o_title, ' ', '' )
def get_title(self):
self.title = gutils.trim(self.page,'size="3"><b>','<')
def get_director(self):
self.director = gutils.trim(self.page,"Regie:","</a><br>")
def get_plot(self):
self.plot = ''
storyid = gutils.regextrim(self.page, '<a href="plot/', '(">|[&])')
if not storyid is None:
story_page = self.open_page(url="http://www.ofdb.de/plot/%s" % (storyid.encode('utf8')))
if story_page:
self.plot = gutils.trim(story_page, "</b><br><br>","</")
def get_year(self):
self.year = gutils.trim(self.page,"Erscheinungsjahr:","</a>")
self.year = gutils.strip_tags(self.year)
def get_runtime(self):
# from imdb
self.runtime = gutils.after(gutils.regextrim(self.imdb_page, 'itemprop="duration"', ' (min|Min)'), '>')
def get_genre(self):
self.genre = gutils.trim(self.page,"Genre(s):","</table>")
self.genre = string.replace(self.genre, "<br>", ", ")
self.genre = gutils.strip_tags(self.genre)
self.genre = string.replace(self.genre, "/", ", ")
self.genre = gutils.clean(self.genre)
self.genre = self.genre[0:-1]
def get_cast(self):
self.cast = ''
movie_id_elements = string.split(self.movie_id, ',')
movie_id_elements[0] = string.replace(movie_id_elements[0], "film/", "")
cast_page = self.open_page(url="http://www.ofdb.de/view.php?page=film_detail&fid=%s" % str(movie_id_elements[0]) )
self.cast = gutils.trim(cast_page, 'Darsteller</i>', '</table>')
self.cast = re.sub('(\n|\t| )', '', self.cast)
self.cast = string.replace(self.cast, '\t', '')
self.cast = string.replace(self.cast, 'class="Daten">', '>\n')
self.cast = string.strip(gutils.strip_tags(self.cast))
self.cast = string.replace(self.cast, '... ', _(' as '))
self.cast = gutils.clean(self.cast)
def get_classification(self):
# from imdb
self.classification = gutils.regextrim(gutils.regextrim(self.imdb_page, '(Altersfreigabe|Certification):', '</div>'), '(Deutschland|Germany):', '(&|[|])')
def get_studio(self):
# from imdb
self.studio = gutils.regextrim(self.imdb_page, 'Production Co:', '(<span|</span>)')
def get_o_site(self):
self.o_site = gutils.trim(gutils.regextrim(self.imdb_page, 'Official Sites:', '(<span|</span>)'), 'href="', '"')
def get_site(self):
self.site = self.url
def get_trailer(self):
self.trailer = ""
def get_country(self):
self.country = gutils.trim(self.page,"Herstellungsland:","</a>")
def get_rating(self):
self.rating = gutils.trim(self.page,"<br>Note: "," ")
if self.rating == '':
self.rating = "0"
self.rating = str(round(float(self.rating)))
class SearchPlugin(movie.SearchMovie):
def __init__(self):
self.original_url_search = "http://www.ofdb.de/view.php?page=suchergebnis&Kat=OTitel&SText="
self.translated_url_search = "http://www.ofdb.de/view.php?page=suchergebnis&Kat=DTitel&SText="
self.encode = 'utf-8'
self.remove_accents = False
def search(self,parent_window):
if not self.open_search(parent_window):
return None
self.page = gutils.trim(self.page,"</b><br><br>", "<br><br><br>");
self.page = string.replace( self.page, "'", '"' )
self.page = string.replace( self.page, '<font size="1">', '' )
self.page = string.replace( self.page, '</font>', '' )
return self.page
def get_searches(self):
elements = string.split(self.page,"<br>")
if (elements[0]<>''):
for element in elements:
elementid = gutils.trim(element,'<a href="','"')
if not elementid is None and not elementid == '':
self.ids.append(elementid)
elementname = gutils.clean(element)
p1 = string.find(elementname, '>')
if p1 == -1:
self.titles.append(elementname)
else:
self.titles.append(elementname[p1+1:])
#
# Plugin Test
#
class SearchPluginTest(SearchPlugin):
#
# Configuration for automated tests:
# dict { movie_id -> [ expected result count for original url, expected result count for translated url ] }
#
test_configuration = {
'Rocky Balboa' : [ 1, 1 ],
'Arahan' : [ 3, 2 ],
'glückliches' : [ 4, 2 ]
}
class PluginTest:
#
# Configuration for automated tests:
# dict { movie_id -> dict { arribute -> value } }
#
# value: * True/False if attribute only should be tested for any value
# * or the expected value
#
test_configuration = {
'film/103013,Rocky%20Balboa' : {
'title' : 'Rocky Balboa',
'o_title' : 'Rocky Balboa',
'director' : 'Sylvester Stallone',
'plot' : True,
'cast' : 'Sylvester Stallone' + _(' as ') + 'Rocky Balboa\n\
Burt Young' + _(' as ') + 'Paulie\n\
Antonio Tarver' + _(' as ') + 'Mason \'The Line\' Dixon\n\
Geraldine Hughes' + _(' as ') + 'Marie\n\
Milo Ventimiglia' + _(' as ') + 'Robert Jr.\n\
Tony Burton' + _(' as ') + 'Duke\n\
A.J. Benza' + _(' as ') + 'L.C.\n\
James Francis Kelly III' + _(' as ') + 'Steps\n\
Lou DiBella' + _(' as ') + 'Himself\n\
Mike Tyson' + _(' as ') + 'Himself\n\
Henry G. Sanders' + _(' as ') + 'Martin\n\
Pedro Lovell' + _(' as ') + 'Spider Rico\n\
Ana Gerena' + _(' as ') + 'Isabel\n\
Angela Boyd' + _(' as ') + 'Angie\n\
Louis Giansante\n\
Maureen Schilling\n\
Lahmard J. Tate\n\
Woody Paige\n\
Skip Bayless\n\
Jay Crawford\n\
Brian Kenny\n\
Dana Jacobson\n\
Charles Johnson\n\
James Binns\n\
Johnnie Hobbs Jr.\n\
Barney Fitzpatrick\n\
Jim Lampley\n\
Larry Merchant\n\
Max Kellerman\n\
LeRoy Neiman\n\
Bert Randolph Sugar\n\
Bernard Fernández\n\
Gunnar Peterson\n\
Yahya\n\
Marc Ratner\n\
Anthony Lato Jr.\n\
Jack Lazzarado\n\
Michael Buffer' + _(' as ') + 'Ring Announcer\n\
Joe Cortez' + _(' as ') + 'Referee\n\
Carter Mitchell\n\
Vinod Kumar\n\
Fran Pultro\n\
Frank Stallone als Frank Stallone Jr.' + _(' as ') + 'Dinner Patron \n\
Jody Giambelluca\n\
Tobias Segal' + _(' as ') + 'Robert\'s Friend\n\
Tim Carr' + _(' as ') + 'Robert\'s Friend \n\
Matt Frack\n\
Paul Dion Monte' + _(' as ') + 'Robert\'s Friend\n\
Kevin King Templeton\n\
Robert Michael Kelly\n\
Rick Buchborn\n\
Nick Baker\n\
Don Sherman' + _(' as ') + 'Andy\n\
Gary Compton\n\
Vale Anoai\n\
Sikander Malik\n\
Michael Ahl\n\
Andrew Aninsman\n\
Ben Bachelder\n\
Lacy Bevis\n\
Tim Brooks\n\
D.T. Carney\n\
Ricky Cavazos' + _(' as ') + 'Boxing Spectator (uncredited)\n\
Rennie Cowan\n\
Deon Derrico\n\
Jacob \'Stitch\' Duran\n\
Simon P. Edwards\n\
Ruben Fischman' + _(' as ') + 'High-Roller in Las Vegas (uncredited)\n\
David Gere\n\
Noah Jacobs\n\
Mark J. Kilbane\n\
Zach Klinefelter\n\
David Kneeream\n\
Dan Montero\n\
Keith Moyer' + _(' as ') + 'Bar Patron (uncredited)\n\
Carol Anne Mueller\n\
Jacqueline Olivia\n\
Brian H. Scott\n\
Keyon Smith\n\
Frank Traynor\n\
Ryan Tygh\n\
Kimberly Villanova',
'country' : 'USA',
'genre' : 'Action, Drama, Sportfilm',
'classification' : False,
'studio' : 'Metro-Goldwyn-Mayer (MGM), Columbia Pictures, Revolution Studios',
'o_site' : False,
'site' : 'http://www.ofdb.de/film/103013,Rocky%20Balboa',
'trailer' : False,
'year' : 2006,
'notes' : False,
'runtime' : 102,
'image' : True,
'rating' : 8
},
'film/22489,Ein-Gl%C3%BCckliches-Jahr' : {
'title' : 'Glückliches Jahr, Ein',
'o_title' : 'Bonne année, La',
'director' : 'Claude Lelouch',
'plot' : False,
'cast' : 'Lino Ventura' + _(' as ') + 'Simon\n\
Françoise Fabian' + _(' as ') + 'Françoise\n\
Charles Gérard' + _(' as ') + 'Charlot\n\
André Falcon' + _(' as ') + 'Le bijoutier\n\
Mireille Mathieu\n\
Lilo\n\
Claude Mann\n\
Frédéric de Pasquale\n\
Gérard Sire\n\
Silvano Tranquilli' + _(' as ') + 'L\'amant italien\n\
André Barello\n\
Michel Bertay\n\
Norman de la Chesnaye\n\
Pierre Edeline\n\
Pierre Pontiche\n\
Michou\n\
Bettina Rheims\n\
Joseph Rythmann\n\
Georges Staquet\n\
Jacques Villedieu\n\
Harry Walter\n\
Elie Chouraqui',
'country' : 'Frankreich',
'genre' : 'Komödie, Krimi',
'classification' : False,
'studio' : 'Les Films 13, Rizzoli Film',
'o_site' : False,
'site' : 'http://www.ofdb.de/film/22489,Ein-Gl%C3%BCckliches-Jahr',
'trailer' : False,
'year' : 1973,
'notes' : False,
'runtime' : 115,
'image' : True,
'rating' : 6
},
'film/54088,Arahan' : {
'title' : 'Arahan',
'o_title' : 'Arahan jangpung daejakjeon',
'director' : 'Ryoo Seung-wan',
'plot' : True,
'cast' : 'Ryoo Seung-beom\n\
Yoon Soy' + _(' as ') + 'Wi-jin\n\
Ahn Seong-gi' + _(' as ') + 'Ja-woon\n\
Jeong Doo-hong' + _(' as ') + 'Heuk-Woon\n\
Yoon Joo-sang' + _(' as ') + 'Moo-woon \n\
Kim Ji-yeong\n\
Baek Chan-gi\n\
Kim Jae-man\n\
Lee Dae-yeon\n\
Kim Dong-ju\n\
Kim Su-hyeon\n\
Geum Dong-hyeon\n\
Lee Jae-goo\n\
Ahn Kil-kang\n\
Bong Tae-gyu' + _(' as ') + 'Cameo\n\
Im Ha-ryong' + _(' as ') + 'Cameo\n\
Yoon Do-hyeon\n\
Lee Choon-yeon' + _(' as ') + 'Cameo\n\
Kim Yeong-in\n\
Park Yoon-bae\n\
Lee Won\n\
Kim Kyeong-ae\n\
Yoo Soon-cheol\n\
Hwang Hyo-eun\n\
Lee Jae-ho\n\
Yang Ik-joon\n\
Kwon Beom-taek\n\
Min Hye-ryeong\n\
Oh Soon-tae\n\
Lee Oi-soo',
'country' : 'Südkorea',
'genre' : 'Action, Fantasy, Komödie',
'classification' : False,
'studio' : 'Fun and Happiness, Good Movie Company',
'o_site' : 'http://www.arahan.co.kr/',
'site' : 'http://www.ofdb.de/film/54088,Arahan',
'trailer' : False,
'year' : 2004,
'notes' : False,
'runtime' : 114,
'image' : True,
'rating' : 7
}
}
| gpl-2.0 |
gsnerf/MediaBrowser | MediaBrowser.WebDashboard/dashboard-ui/bower_components/emby-webcomponents/filedownloader.js | 251 | define(['multi-download'], function (multiDownload) {
'use strict';
return {
download: function (items) {
multiDownload(items.map(function (item) {
return item.url;
}));
}
};
}); | gpl-2.0 |
jbelborja/porfinyaerahora | tmp/com_proforms_basic_v_1.5.5_patched/administrator/components/com_proforms/css/admin.responsivewhizzard.css | 17597 | div.whizzardWrap{
display:block;
width: 100%;
box-sizing: border-box;
-moz-box-sizing: border-box;
background-color: #eee;
counter-reset:section;
}
div.responsiveWhizzard{
position: relative;
display:block;
width: 100%;
box-sizing: border-box;
-moz-box-sizing: border-box;
min-height: 400px;
padding:0; margin:0;
margin-bottom: 20px;
}
div.responsiveWhizzard div.pattern{
position: absolute;
top:0; left:0;
display:block;
width: 100%; height: 100%;
box-sizing: border-box;
-moz-box-sizing: border-box;
border: none;
padding:0; margin:0;
background-color: #eee;
}
div.responsiveWhizzard div.pattern div.light{
position: absolute;;
display:inline-block;
float:left;
width: 2.43478260869%;
height: 100%;
background-color: #fff;
box-sizing: border-box;
-moz-box-sizing: border-box;
top: 0; right:0;
}
div.responsiveWhizzard div.pattern div.dark{
position: relative;
float: left;
width: 2.690217391%; height: 100%;
box-sizing: border-box;
-moz-box-sizing: border-box;
border: none;
padding:0; margin:0;
margin-right: 1.43478260869%;
background-color: #ddd;
}
div.responsiveWhizzard div.layoutContainer{
position: relative;
display:block;
width: 100%;
box-sizing: border-box;
-moz-box-sizing: border-box;
border: none;
padding:0; margin:0;
background-color: transparent;
z-index: 10;
padding-bottom: 80px;
}
div.responsiveWhizzard div.layoutContainer div._row{
position: relative;
display: block;
min-height: 80px;
background-color: transparent;
border: none;
margin:0; padding:0;
box-sizing: border-box;
-moz-box-sizing: border-box;
width: 100%;
clear:both;
margin-bottom: 32px;
border-top: 1px solid #CCCCCC;
border-bottom: 1px solid #CCCCCC;
box-shadow: 0 1px 1px rgba(0, 0, 0, 0.075) inset;
transition: border 0.2s linear 0s, box-shadow 0.2s linear 0s
}
div.responsiveWhizzard div.layoutContainer div._row:hover{
border-top-color: rgba(82, 168, 236, 0.8);
border-bottom-color: rgba(82, 168, 236, 0.8);
box-shadow: 4px 4px 6px rgba(0, 0, 0, 0.075);
outline: 0 none;
}
div.responsiveWhizzard div.layoutContainer div._row div._rowButtons{
position: absolute;
display:block;
width: 32px;
height: 100%;
top:0; right: -32px;
border: 1px solid #ccc;
background-color: #eee;
/* margin-right: 3.896956521738%; */
margin-right: 2.690217391%;
box-sizing: border-box;
-moz-box-sizing: border-box;
text-align: center;
}
div.responsiveWhizzard div.layoutContainer div._row div._rowButtons img{
cursor: pointer;
clear: both;
}
div.responsiveWhizzard div.layoutContainer div._row:after{
content: ".";
display: block;
clear: both;
visibility: hidden;
line-height: 0;
height: 0;
}
div.responsiveWhizzard div.layoutContainer div.section{
position: relative;
display: block;
min-height: 130px;
background-color: #aaa;
border: 1px solid #888;
margin:0; padding:0;
box-sizing: border-box;
-moz-box-sizing: border-box;
transition: border 0.2s linear 0s, background 0.2s linear 0s;
float:left;
margin-right: 1.43478260869%;
cursor: pointer;
-moz-user-select: none;
-khtml-user-select: none;
-webkit-user-select: none;
user-select: none;
}
div.responsiveWhizzard div.layoutContainer div.section:hover{
background-color: #0088CC;
border-color: #194071;
}
div.responsiveWhizzard div.layoutContainer div.section[data-span='1']{ width: 2.690217391%; }
div.responsiveWhizzard div.layoutContainer div.section[data-span='2']{ width: 6.815217391%; }
div.responsiveWhizzard div.layoutContainer div.section[data-span='3']{ width: 10.940217391%; }
div.responsiveWhizzard div.layoutContainer div.section[data-span='4']{ width: 15.065217391%; }
div.responsiveWhizzard div.layoutContainer div.section[data-span='5']{ width: 19.190217391%; }
div.responsiveWhizzard div.layoutContainer div.section[data-span='6']{ width: 23.315217391%; }
div.responsiveWhizzard div.layoutContainer div.section[data-span='7']{ width: 27.440217391%; }
div.responsiveWhizzard div.layoutContainer div.section[data-span='8']{ width: 31.565217391%; }
div.responsiveWhizzard div.layoutContainer div.section[data-span='9']{ width: 35.690217391%; }
div.responsiveWhizzard div.layoutContainer div.section[data-span='10']{ width: 39.815217391%; }
div.responsiveWhizzard div.layoutContainer div.section[data-span='11']{ width: 43.940217391%; }
div.responsiveWhizzard div.layoutContainer div.section[data-span='12']{ width: 48.065217391%; }
div.responsiveWhizzard div.layoutContainer div.section[data-span='13']{ width: 52.190217391%; }
div.responsiveWhizzard div.layoutContainer div.section[data-span='14']{ width: 56.315217391%; }
div.responsiveWhizzard div.layoutContainer div.section[data-span='15']{ width: 60.440217391%; }
div.responsiveWhizzard div.layoutContainer div.section[data-span='16']{ width: 64.565217391%; }
div.responsiveWhizzard div.layoutContainer div.section[data-span='17']{ width: 68.690217391%; }
div.responsiveWhizzard div.layoutContainer div.section[data-span='18']{ width: 72.815217391%; }
div.responsiveWhizzard div.layoutContainer div.section[data-span='19']{ width: 76.940217391%; }
div.responsiveWhizzard div.layoutContainer div.section[data-span='20']{ width: 81.065217391%; }
div.responsiveWhizzard div.layoutContainer div.section[data-span='21']{ width: 85.190217391%; }
div.responsiveWhizzard div.layoutContainer div.section[data-span='22']{ width: 89.315217391%; }
div.responsiveWhizzard div.layoutContainer div.section[data-span='23']{ width: 93.440217391%; }
div.responsiveWhizzard div.layoutContainer div.section[data-span='24']{ width: 97.565217391%; margin-right:0; }
div.responsiveWhizzard div.layoutContainer div.last{
margin-right: 0;
}
#m4jMain div.responsiveWhizzard div.layoutContainer div.section fieldset{
position: absolute;
display:block;
border-color: #000;
width: 100%; height: 100%;
margin:0; padding:4px;
min-height: 100%;
box-sizing: border-box;
-moz-box-sizing: border-box;
border-color: #0088CC;
}
#m4jMain div.responsiveWhizzard div.layoutContainer div.section legend{
color: #004466;
font-style: italic;
box-sizing: border-box;
-moz-box-sizing: border-box;
background-color: transparent;
border: none;
margin: 0; padding: 0 2%;
line-height: 24px;
height: 24px;
margin-left: 2%;
}
#m4jMain div.responsiveWhizzard div.layoutContainer div.section:hover fieldset, #m4jMain div.responsiveWhizzard div.layoutContainer div.section:hover legend{
color: #fff;
border-color: #fff;
}
body {
counter-reset: section; /* Set the section counter to 0 */
}
div.responsiveWhizzard div.layoutContainer div.section span.count{
position: absolute;
display:block;
width: 20px; height: 20px;
-moz-border-radius: 20px;
-webkit-border-radius: 20px;
-khtml-border-radius: 20px;
border-radius: 20px;
border: 3px solid #444;
background-color: #fff;
line-height: 20px;
vertical-align: middle;
text-align: center;
font-size: 14px;
font-weight:normal;
color: #000;
left:-10px; top: -18px;
z-index: 10;
transition: border 0.2s linear 0s, background 0.2s linear 0s;
}
div.responsiveWhizzard div.layoutContainer div.section:hover span.count{
background-color: #ffff00;
border-color: #000;
}
div.responsiveWhizzard div.layoutContainer div.section span.count:before{
counter-increment:section;
content:counter(section) ;
}
div.responsiveWhizzard div.layoutContainer div.section span.ruler{
position: absolute;
top: -24px; right: 0;
width: 32px; height: 24px;
display:block;
font-size: 16px;
border: 1px solid #888;
margin:0; padding: 2px;
box-sizing: border-box;
-moz-box-sizing: border-box;
border-bottom: none;
-moz-border-top-left-radius:: 20px;
-webkit-border-top-left-radius:: 20px;
-khtml-border-top-left-radius:: 20px;
border-top-left-radius: 20px;
-moz-border-top-right-radius:: 20px;
-webkit-border-top-right-radius:: 20px;
-khtml-border-top-right-radius:: 20px;
border-top-right-radius: 20px;
background-color: #aaa;
transition: border 0.2s linear 0s, background 0.2s linear 0s;
text-align: center;
-moz-user-select: none;
-khtml-user-select: none;
-webkit-user-select: none;
user-select: none;
cursor: col-resize;
}
div.responsiveWhizzard div.layoutContainer div.section:hover span.ruler{
background-color: #0088CC;
border-color: #194071;
}
div.responsiveWhizzard div.layoutContainer div.section:hover span.ruler:hover{
background-color: #00cc88;
border-color: #194071;
border-bottom: 1px solid #0088CC;
}
div.responsiveWhizzard div.layoutContainer div.section div.division{
display:block;
height: 100%; width: 100%;
box-sizing: border-box;
-moz-box-sizing: border-box;
margin-top: 24px;
-ms-filter: "progid:DXImageTransform.Microsoft.Alpha(Opacity=80)";
filter: alpha(opacity=80);
-moz-opacity: 0.8;
-khtml-opacity: 0.8;
opacity: 0.8;
}
div.responsiveWhizzard div.layoutContainer div.section fieldset div.division{
margin-top: 0;
}
div.responsiveWhizzard div.layoutContainer div.section div.division div{
display:block;
box-sizing: border-box;
-moz-box-sizing: border-box;
float:left;
margin:0; padding: 0;
color: #000;
padding: 4px 0;
text-align: center;
line-height: 50px;
height: 60px;
text-overflow:ellipsis;
overflow: hidden;
}
div.responsiveWhizzard div.layoutContainer div.section div.question{
background-color: #efefef;
}
div.responsiveWhizzard div.layoutContainer div.section div.field{
background-color: #ddd;
}
div.responsiveWhizzard div.layoutContainer div.section div.floating{
position: absolute;
top:24px; left:0;
display:block;
height: 81px; width: 100%;
box-sizing: border-box;
-moz-box-sizing: border-box;
-ms-filter: "progid:DXImageTransform.Microsoft.Alpha(Opacity=80)";
filter: alpha(opacity=80);
-moz-opacity: 0.8;
-khtml-opacity: 0.8;
opacity: 0.8;
overflow: hidden;
text-overflow:ellipsis;
}
div.responsiveWhizzard div.layoutContainer div.section div.floating span{
display:inline-block;
width: 80px; height: 80px;
box-sizing: border-box;
-moz-box-sizing: border-box;
border: 2px solid #ddd;
margin-left: 10px;
}
div.responsiveWhizzard div.layoutContainer div.section div.arrowRight{
position: absolute;
display:block;
bottom:0; right:0;
height: 80px;
color: #888;
font-size: 60px;
transition: color 0.2s linear 0s, text-shadow 0.2s linear 0s;
text-shadow: 1px 0 0 #444, 0 -1px 0 #444, 0 1px 0 #444, -1px 0 0 #444;
}
div.responsiveWhizzard div.layoutContainer div.section:hover div.arrowRight{
color: #194678;
text-shadow: 1px 0 0 #fff, 0 -1px 0 #fff, 0 1px 0 #fff, -1px 0 0 #fff;
}
div.responsiveWhizzard div.layoutContainer div.section div.arrowDown{
position: absolute;
display:block;
top:0; right:-10px;
line-height:140px;
color: #888;
font-size: 120px;
transition: color 0.2s linear 0s;
transition: color 0.2s linear 0s, text-shadow 0.2s linear 0s;
text-shadow: 1px 0 0 #444, 0 -1px 0 #444, 0 1px 0 #444, -1px 0 0 #444;
overflow: hidden;
}
div.responsiveWhizzard div.layoutContainer div.section:hover div.arrowDown{
color: #194678;
text-shadow: 1px 0 0 #fff, 0 -1px 0 #fff, 0 1px 0 #fff, -1px 0 0 #fff;
}
div.responsiveWhizzard div.layoutContainer div.section div.arrowDown span {
display:block;
margin-top: -20px;
}
div.responsiveWhizzard div.layoutContainer div.section div.slotTitle{
position: absolute; display:block;
left: 10px; bottom: -10px;
height: 28px;
box-sizing: border-box;
-moz-box-sizing: border-box;
font-size: 14px;
max-width: 60%;
border: 1px solid #888;
background-color: #BFDAE8;
padding: 4px;
text-overflow:ellipsis;
overflow: hidden;
-moz-border-radius: 5px;
-webkit-border-radius: 5px;
-khtml-border-radius: 5px;
border-radius: 5px;
}
div.responsiveWhizzard div.layoutContainer div.section div.countButton{
position: absolute; display:block;
left: 14px; bottom: 20px;
box-sizing: border-box;
-moz-box-sizing: border-box;
height: 20px;
text-align: center;
font-size: 12px;
border: none;
font-weight: bold;
background-color: #444;
padding: 4px;
line-height: 100%;
color: #ddd;
-moz-border-radius: 10px;
-webkit-border-radius: 10px;
-khtml-border-radius: 10px;
border-radius: 10px;
}
div.responsiveWhizzard div.layoutContainer div.section div.height, div.responsiveWhizzard div.layoutContainer div.section div.minHeight{
position: absolute; display:none;
left:0; width: 100%;
box-sizing: border-box;
-moz-box-sizing: border-box;
height: 1px;
border-top: 1px dashed #f00;
margin-top: -1px;
}
div.responsiveWhizzard div.layoutContainer div.section div.minHeight{
border-color: #4BBA49;
}
ul#spanSelect{
display:inline-block;
list-style: none;
margin:0; padding:0;
margin-bottom: 10px;
text-shadow: 0 -1px #444;
}
ul#spanSelect span{
display:inline-block;
margin:0; padding: 4px;
-moz-border-radius: 3px;
-webkit-border-radius: 3px;
-khtml-border-radius: 3px;
border-radius: 3px;
background-color: #BB4C42;
color: #fff;
font-weight: bold;
cursor: default;
}
ul#spanSelect ul{
position: absolute;
display:block;
background-color: #BB4C42;
color: #fff;
list-style: none;
margin:0; padding:0;
margin-top: -4px;
-moz-border-radius: 3x;
-webkit-border-radius: 3px;
-khtml-border-radius: 3px;
border-radius: 3px;
z-index: 100;
left: -9999em;
}
ul#spanSelect:hover ul{
left: auto;
}
ul#spanSelect ul li{
display:block;
margin:0; padding: 4px;
cursor: pointer;
}
ul#spanSelect ul li:hover{
text-decoration: underline;
}
div#hidePane{
position: fixed;
display:block;
margin:0; padding:0;
width:100%; height: 100%;
top:0; left:-9999em;
-ms-filter: "progid:DXImageTransform.Microsoft.Alpha(Opacity=80)";
filter: alpha(opacity=80);
-moz-opacity: 0.8;
-khtml-opacity: 0.8;
opacity: 0.8;
background-color: #fff;
z-index: 10000;
}
div#editAnimPane{
position: fixed;
display:block;
margin:0; padding:0;
top:0; left:-9999em;
background-color: #0088cc;
-ms-filter: "progid:DXImageTransform.Microsoft.Alpha(Opacity=0)";
filter: alpha(opacity=0);
-moz-opacity: 0;
-khtml-opacity: 0;
opacity: 0;
z-index: 10001;
}
div#editPane{
position: fixed;
display:block;
margin:0; padding:10px;
width:800px; height: 600px;
top:0; left:-9999em;
background-color: #0088cc;
-ms-filter: "progid:DXImageTransform.Microsoft.Alpha(Opacity=100)";
filter: alpha(opacity=100);
-moz-opacity: 1;
-khtml-opacity: 1;
opacity: 1;
-moz-box-shadow: 0 0 5px #666;
-webkit-box-shadow: 0 0 5px#666;
box-shadow: 0 0 5px #666;
box-sizing: border-box;
-moz-box-sizing: border-box;
z-index: 10002;
color: #fff;
}
div#editPane div.editRow{
position: relative;
display:block;
width:100%;
margin-bottom: 15px;
}
div#editPane div.editRow div.hide{
position: absolute; display:block;
left:0; top:0;
width: 100%;
height: 100%;
background-color: #fff;
-ms-filter: "progid:DXImageTransform.Microsoft.Alpha(Opacity=80)";
filter: alpha(opacity=80);
-moz-opacity: 0.8;
-khtml-opacity: 0.8;
opacity: 0.8;
margin:0; padding:0; border: none;
}
div#editPane div.editRow:after{
content: ".";
display: block;
clear: both;
visibility: hidden;
line-height: 0;
height: 0;
}
div#editPane h1{
font-size: 24px;
font-weight: bold;
margin:0; padding:0;
margin-bottom: 5px;
text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25);
}
div#editPane label{
display:inline-block;
font-size: 12px;
height: 20px;
line-height: 20px;
margin: 0 3px 0 0;
padding: 2px;
cursor: default;
min-width: 250px;
}
div#editPane label img{
display:inline;
line-height: 20px;
margin-right: 10px;
}
div#editPane div.special , div#editPane div.wrap {
display: inline-block;
line-height: 20px;
}
div#editPane div.special div{
margin-bottom: -3px;
}
div#editPane textarea, div#editPane input{
display:inline-block;
background-color: #FFFFFF;
border: 1px solid #CCCCCC;
box-shadow: 0 1px 1px rgba(0, 0, 0, 0.075) inset;
transition: border 0.2s linear 0s, box-shadow 0.2s linear 0s;
font-size: 12px;
height: 20px;
line-height: 20px;
margin: 0 3px 0 0;
padding: 2px;
}
div#editPane textarea:focus, div#editPane input:focus{
border-color: rgba(82, 168, 236, 0.8);
box-shadow: 0 1px 1px rgba(0, 0, 0, 0.075) inset, 0 0 8px rgba(82, 168, 236, 0.6);
outline: 0 none;
}
div#editPane div.content{
-moz-border-radius: 10px;
-webkit-border-radius: 10px;
-khtml-border-radius: 10px;
border-radius: 10px;
display: block;
width: 100%;
background-color: #fff;
color: #000;
box-sizing: border-box;
-moz-box-sizing: border-box;
padding: 10px;
min-height: 500px;
}
.m4jToggleArrangement {
background: url("./../images/toggle_layout_align.png") no-repeat scroll 0 0 rgba(0, 0, 0, 0);
border: medium none;
cursor: pointer;
display: block;
float: left;
height: 17px;
margin: 2px 0 0;
padding: 0;
text-align: left;
vertical-align: top;
width: 77px;
}
#right.m4jToggleArrangement{
background-position: 0 -17px;
}
div.setValues{
position: absolute;
bottom: 34px; right: 20px;
display:inline-block;
text-shadow: 0 -1px #00692C;
margin:0; padding: 4px;
-moz-border-radius: 3px;
-webkit-border-radius: 3px;
-khtml-border-radius: 3px;
border-radius: 3px;
background-color: #0088cc;
color: #fff;
font-size: 16px;
font-weight: bold;
cursor: pointer;
transition: background 0.2s linear 0s;
}
div.setValues:hover{
background-color: #00aa66;
}
| gpl-2.0 |
mcberg2016/graal-core2 | graal/org.graalvm.compiler.loop/src/org/graalvm/compiler/loop/DerivedOffsetInductionVariable.java | 4905 | /*
* Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package org.graalvm.compiler.loop;
import static org.graalvm.compiler.loop.MathUtil.add;
import static org.graalvm.compiler.loop.MathUtil.sub;
import org.graalvm.compiler.core.common.type.Stamp;
import org.graalvm.compiler.debug.GraalError;
import org.graalvm.compiler.nodes.ValueNode;
import org.graalvm.compiler.nodes.calc.AddNode;
import org.graalvm.compiler.nodes.calc.BinaryArithmeticNode;
import org.graalvm.compiler.nodes.calc.IntegerConvertNode;
import org.graalvm.compiler.nodes.calc.NegateNode;
import org.graalvm.compiler.nodes.calc.SubNode;
public class DerivedOffsetInductionVariable extends DerivedInductionVariable {
private final ValueNode offset;
private final BinaryArithmeticNode<?> value;
public DerivedOffsetInductionVariable(LoopEx loop, InductionVariable base, ValueNode offset, BinaryArithmeticNode<?> value) {
super(loop, base);
this.offset = offset;
this.value = value;
}
public ValueNode getOffset() {
return offset;
}
@Override
public Direction direction() {
return base.direction();
}
@Override
public ValueNode valueNode() {
return value;
}
@Override
public boolean isConstantInit() {
return offset.isConstant() && base.isConstantInit();
}
@Override
public boolean isConstantStride() {
return base.isConstantStride();
}
@Override
public long constantInit() {
return op(base.constantInit(), offset.asJavaConstant().asLong());
}
@Override
public long constantStride() {
if (value instanceof SubNode && base.valueNode() == value.getY()) {
return -base.constantStride();
}
return base.constantStride();
}
@Override
public ValueNode initNode() {
return op(base.initNode(), offset);
}
@Override
public ValueNode strideNode() {
if (value instanceof SubNode && base.valueNode() == value.getY()) {
return graph().addOrUniqueWithInputs(NegateNode.create(base.strideNode()));
}
return base.strideNode();
}
@Override
public ValueNode extremumNode(boolean assumePositiveTripCount, Stamp stamp) {
return op(base.extremumNode(assumePositiveTripCount, stamp), IntegerConvertNode.convert(offset, stamp, graph()));
}
@Override
public ValueNode exitValueNode() {
return op(base.exitValueNode(), offset);
}
@Override
public boolean isConstantExtremum() {
return offset.isConstant() && base.isConstantExtremum();
}
@Override
public long constantExtremum() {
return op(base.constantExtremum(), offset.asJavaConstant().asLong());
}
private long op(long b, long o) {
if (value instanceof AddNode) {
return b + o;
}
if (value instanceof SubNode) {
if (base.valueNode() == value.getX()) {
return b - o;
} else {
assert base.valueNode() == value.getY();
return o - b;
}
}
throw GraalError.shouldNotReachHere();
}
private ValueNode op(ValueNode b, ValueNode o) {
if (value instanceof AddNode) {
return add(graph(), b, o);
}
if (value instanceof SubNode) {
if (base.valueNode() == value.getX()) {
return sub(graph(), b, o);
} else {
assert base.valueNode() == value.getY();
return sub(graph(), o, b);
}
}
throw GraalError.shouldNotReachHere();
}
@Override
public void deleteUnusedNodes() {
}
@Override
public String toString() {
return String.format("DerivedOffsetInductionVariable base (%s) %s %s", base, value.getNodeClass().shortName(), offset);
}
}
| gpl-2.0 |
torque/mpv | player/javascript/defaults.js | 18251 | "use strict";
(function main_default_js(g) {
// - g is the global object.
// - User callbacks called without 'this', global only if callee is non-strict.
// - The names of function expressions are not required, but are used in stack
// traces. We name them where useful to show up (fname:#line always shows).
mp.msg = { log: mp.log };
mp.msg.verbose = mp.log.bind(null, "v");
var levels = ["fatal", "error", "warn", "info", "debug"];
levels.forEach(function(l) { mp.msg[l] = mp.log.bind(null, l) });
// same as {} but without inherited stuff, e.g. o["toString"] doesn't exist.
// used where we try to fetch items by keys which we don't absolutely trust.
function new_cache() {
return Object.create(null, {});
}
/**********************************************************************
* event handlers, property observers, client messages, hooks
*********************************************************************/
var ehandlers = new_cache() // items of event-name: array of {maybe cb: fn}
mp.register_event = function(name, fn) {
if (!ehandlers[name])
ehandlers[name] = [];
ehandlers[name] = ehandlers[name].concat([{cb: fn}]); // replaces the arr
return mp._request_event(name, true);
}
mp.unregister_event = function(fn) {
for (var name in ehandlers) {
ehandlers[name] = ehandlers[name].filter(function(h) {
if (h.cb != fn)
return true;
delete h.cb; // dispatch could have a ref to h
}); // replacing, not mutating the array
if (!ehandlers[name].length) {
delete ehandlers[name];
mp._request_event(name, false);
}
}
}
// call only pre-registered handlers, but not ones which got unregistered
function dispatch_event(e) {
var handlers = ehandlers[e.event];
if (handlers) {
for (var len = handlers.length, i = 0; i < len; i++) {
var cb = handlers[i].cb; // 'handlers' won't mutate, but unregister
if (cb) // could remove cb from some items
cb(e);
}
}
}
// ----- property observers -----
var next_oid = 1,
observers = new_cache(); // items of id: fn
mp.observe_property = function(name, format, fn) {
var id = next_oid++;
observers[id] = fn;
return mp._observe_property(id, name, format || undefined); // allow null
}
mp.unobserve_property = function(fn) {
for (var id in observers) {
if (observers[id] == fn) {
delete observers[id];
mp._unobserve_property(id);
}
}
}
function notify_observer(e) {
var cb = observers[e.id];
if (cb)
cb(e.name, e.data);
}
// ----- Client messages -----
var messages = new_cache(); // items of name: fn
// overrides name. no libmpv API to reg/unreg specific messages.
mp.register_script_message = function(name, fn) {
messages[name] = fn;
}
mp.unregister_script_message = function(name) {
delete messages[name];
}
function dispatch_message(ev) {
var cb = ev.args.length ? messages[ev.args[0]] : false;
if (cb)
cb.apply(null, ev.args.slice(1));
}
// ----- hooks -----
var next_hid = 1,
hooks = new_cache(); // items of id: fn
function hook_run(id, cont) {
var cb = hooks[id];
if (cb)
cb();
mp.commandv("hook-ack", cont);
}
mp.add_hook = function add_hook(name, pri, fn) {
if (next_hid == 1) // doesn't really matter if we do it once or always
mp.register_script_message("hook_run", hook_run);
var id = next_hid++;
hooks[id] = fn;
return mp.commandv("hook-add", name, id, pri);
}
/**********************************************************************
* key bindings
*********************************************************************/
// binds: items of (binding) name which are objects of:
// {cb: fn, forced: bool, maybe input: str, repeatable: bool, complex: bool}
var binds = new_cache();
function dispatch_key_binding(name, state) {
var cb = binds[name] ? binds[name].cb : false;
if (cb) // "script-binding [<script_name>/]<name>" command was invoked
cb(state);
}
function update_input_sections() {
var def = [], forced = [];
for (var n in binds) // Array.join() will later skip undefined .input
(binds[n].forced ? forced : def).push(binds[n].input);
var sect = "input_" + mp.script_name;
mp.commandv("define-section", sect, def.join("\n"), "default");
mp.commandv("enable-section", sect, "allow-hide-cursor+allow-vo-dragging");
sect = "input_forced_" + mp.script_name;
mp.commandv("define-section", sect, forced.join("\n"), "force");
mp.commandv("enable-section", sect, "allow-hide-cursor+allow-vo-dragging");
}
// name/opts maybe omitted. opts: object with optional bool members: repeatable,
// complex, forced, or a string str which is evaluated as object {str: true}.
var next_bid = 1;
function add_binding(forced, key, name, fn, opts) {
if (typeof name == "function") { // as if "name" is not part of the args
opts = fn;
fn = name;
name = "__keybinding" + next_bid++; // new unique binding name
}
var key_data = {forced: forced};
switch (typeof opts) { // merge opts into key_data
case "string": key_data[opts] = true; break;
case "object": for (var o in opts) key_data[o] = opts[o];
}
if (key_data.complex) {
mp.register_script_message(name, function msg_cb() {
fn({event: "press", is_mouse: false});
});
var KEY_STATES = { u: "up", d: "down", r: "repeat", p: "press" };
key_data.cb = function key_cb(state) {
fn({
event: KEY_STATES[state[0]] || "unknown",
is_mouse: state[1] == "m"
});
}
} else {
mp.register_script_message(name, fn);
key_data.cb = function key_cb(state) {
// Emulate the semantics at input.c: mouse emits on up, kb on down.
// Also, key repeat triggers the binding again.
var e = state[0],
emit = (state[1] == "m") ? (e == "u") : (e == "d");
if (emit || e == "p" || e == "r" && key_data.repeatable)
fn();
}
}
if (key)
key_data.input = key + " script-binding " + mp.script_name + "/" + name;
binds[name] = key_data; // used by user and/or our (key) script-binding
update_input_sections();
}
mp.add_key_binding = add_binding.bind(null, false);
mp.add_forced_key_binding = add_binding.bind(null, true);
mp.remove_key_binding = function(name) {
mp.unregister_script_message(name);
delete binds[name];
update_input_sections();
}
/**********************************************************************
Timers: compatible HTML5 WindowTimers - set/clear Timeout/Interval
- Spec: https://www.w3.org/TR/html5/webappapis.html#timers
- Guaranteed to callback a-sync to [re-]insertion (event-loop wise).
- Guaranteed to callback by expiration order, or, if equal, by insertion order.
- Not guaranteed schedule accuracy, though intervals should have good average.
*********************************************************************/
// pending 'timers' ordered by expiration: latest at index 0 (top fires first).
// Earlier timers are quicker to handle - just push/pop or fewer items to shift.
var next_tid = 1,
timers = [], // while in process_timers, just insertion-ordered (push)
tset_is_push = false, // signal set_timer that we're in process_timers
tcanceled = false, // or object of items timer-id: true
now = mp.get_time_ms; // just an alias
function insert_sorted(arr, t) {
for (var i = arr.length - 1; i >= 0 && t.when >= arr[i].when; i--)
arr[i + 1] = arr[i]; // move up timers which fire earlier than t
arr[i + 1] = t; // i is -1 or fires later than t
}
// args (is "arguments"): fn_or_str [,duration [,user_arg1 [, user_arg2 ...]]]
function set_timer(repeat, args) {
var fos = args[0],
duration = Math.max(0, (args[1] || 0)), // minimum and default are 0
t = {
id: next_tid++,
when: now() + duration,
interval: repeat ? duration : -1,
callback: (typeof fos == "function") ? fos : Function(fos),
args: (args.length < 3) ? false : [].slice.call(args, 2),
};
if (tset_is_push) {
timers.push(t);
} else {
insert_sorted(timers, t);
}
return t.id;
}
g.setTimeout = function setTimeout() { return set_timer(false, arguments) };
g.setInterval = function setInterval() { return set_timer(true, arguments) };
g.clearTimeout = g.clearInterval = function(id) {
if (id < next_tid) { // must ignore if not active timer id.
if (!tcanceled)
tcanceled = {};
tcanceled[id] = true;
}
}
// arr: ordered timers array. ret: -1: no timers, 0: due, positive: ms to wait
function peek_wait(arr) {
return arr.length ? Math.max(0, arr[arr.length - 1].when - now()) : -1;
}
// Callback all due non-canceled timers which were inserted before calling us.
// Returns wait in ms till the next timer (possibly 0), or -1 if nothing pends.
function process_timers() {
var wait = peek_wait(timers);
if (wait != 0)
return wait;
var actives = timers; // only process those already inserted by now
timers = []; // we'll handle added new timers at the end of processing.
tset_is_push = true; // signal set_timer to just push-insert
do {
var t = actives.pop();
if (tcanceled && tcanceled[t.id])
continue;
if (t.args) {
t.callback.apply(null, t.args);
} else {
(0, t.callback)(); // faster, nicer stack trace than t.cb.call()
}
if (t.interval >= 0) {
// allow 20 ms delay/clock-resolution/gc before we skip and reset
t.when = Math.max(now() - 20, t.when + t.interval);
timers.push(t); // insertion order only
}
} while (peek_wait(actives) == 0);
// new 'timers' are insertion-ordered. remains of actives are fully ordered
timers.forEach(function(t) { insert_sorted(actives, t) });
timers = actives; // now we're fully ordered again, and with all timers
tset_is_push = false;
if (tcanceled) {
timers = timers.filter(function(t) { return !tcanceled[t.id] });
tcanceled = false;
}
return peek_wait(timers);
}
/**********************************************************************
CommonJS module/require
Spec: http://wiki.commonjs.org/wiki/Modules/1.1.1
- All the mandatory requirements are implemented, all the unit tests pass.
- The implementation makes the following exception:
- Allows the chars [~@:\\] in module id for meta-dir/builtin/dos-drive/UNC.
Implementation choices beyond the specification:
- A module may assign to module.exports (rather than only to exports).
- A module's 'this' is the global object, also if it sets strict mode.
- No 'global'/'self'. Users can do "this.global = this;" before require(..)
- A module has "privacy of its top scope", runs in its own function context.
- No id identity with symlinks - a valid choice which others make too.
- require("X") always maps to "X.js" -> require("foo.js") is file "foo.js.js".
- Global modules search paths are 'scripts/modules.js/' in mpv config dirs.
- A main script could e.g. require("./abc") to load a non-global module.
- Module id supports mpv path enhancements, e.g. ~/foo, ~~/bar, ~~desktop/baz
*********************************************************************/
// Internal meta top-dirs. Users should not rely on these names.
var MODULES_META = "~~modules",
SCRIPTDIR_META = "~~scriptdir", // relative script path -> meta absolute id
main_script = mp.utils.split_path(mp.script_file); // -> [ path, file ]
function resolve_module_file(id) {
var sep = id.indexOf("/"),
base = id.substring(0, sep),
rest = id.substring(sep + 1) + ".js";
if (base == SCRIPTDIR_META)
return mp.utils.join_path(main_script[0], rest);
if (base == MODULES_META) {
var path = mp.find_config_file("scripts/modules.js/" + rest);
if (!path)
throw(Error("Cannot find module file '" + rest + "'"));
return path;
}
return id + ".js";
}
// Delimiter '/', remove redundancies, prefix with modules meta-root if needed.
// E.g. c:\x -> c:/x, or ./x//y/../z -> ./x/z, or utils/x -> ~~modules/utils/x .
function canonicalize(id) {
var path = id.replace(/\\/g,"/").split("/"),
t = path[0],
base = [];
// if not strictly relative then must be top-level. figure out base/rest
if (t != "." && t != "..") {
// global module if it's not fs-root/home/dos-drive/builtin/meta-dir
if (!(t == "" || t == "~" || t[1] == ":" || t == "@" || t.match(/^~~/)))
path.unshift(MODULES_META); // add an explicit modules meta-root
if (id.match(/^\\\\/)) // simple UNC handling, preserve leading \\srv
path = ["\\\\" + path[2]].concat(path.slice(3)); // [ \\srv, shr..]
if (t[1] == ":" && t.length > 2) { // path: [ "c:relative", "path" ]
path[0] = t.substring(2);
path.unshift(t[0] + ":."); // -> [ "c:.", "relative", "path" ]
}
base = [path.shift()];
}
// path is now logically relative. base, if not empty, is its [meta] root.
// normalize the relative part - always id-based (spec Module Id, 1.3.6).
var cr = []; // canonicalized relative
for (var i = 0; i < path.length; i++) {
if (path[i] == "." || path[i] == "")
continue;
if (path[i] == ".." && cr.length && cr[cr.length - 1] != "..") {
cr.pop();
continue;
}
cr.push(path[i]);
}
if (!base.length && cr[0] != "..")
base = ["."]; // relative and not ../<stuff> so must start with ./
return base.concat(cr).join("/");
}
function resolve_module_id(base_id, new_id) {
new_id = canonicalize(new_id);
if (!new_id.match(/^\.\/|^\.\.\//)) // doesn't start with ./ or ../
return new_id; // not relative, we don't care about base_id
var combined = mp.utils.join_path(mp.utils.split_path(base_id)[0], new_id);
return canonicalize(combined);
}
var req_cache = new_cache(); // global for all instances of require
// ret: a require function instance which uses base_id to resolve relative id's
function new_require(base_id) {
return function require(id) {
id = resolve_module_id(base_id, id); // id is now top-level
if (req_cache[id])
return req_cache[id].exports;
var new_module = {id: id, exports: {}};
req_cache[id] = new_module;
try {
var filename = resolve_module_file(id);
// we need dedicated free vars + filename in traces + allow strict
var str = "mp._req = function(require, exports, module) {" +
mp.utils.read_file(filename) +
"\n;}";
mp.utils.compile_js(filename, str)(); // only runs the assignment
var tmp = mp._req; // we have mp._req, or else we'd have thrown
delete mp._req;
tmp.call(g, new_require(id), new_module.exports, new_module);
} catch (e) {
delete req_cache[id];
throw(e);
}
return new_module.exports;
};
}
g.require = new_require(SCRIPTDIR_META + "/" + main_script[1]);
/**********************************************************************
* various
*********************************************************************/
g.print = mp.msg.info; // convenient alias
mp.get_script_name = function() { return mp.script_name };
mp.get_script_file = function() { return mp.script_file };
mp.get_time = function() { return mp.get_time_ms() / 1000 };
mp.utils.getcwd = function() { return mp.get_property("working-directory") };
mp.dispatch_event = dispatch_event;
mp.process_timers = process_timers;
mp.get_opt = function(key, def) {
var v = mp.get_property_native("options/script-opts")[key];
return (typeof v != "undefined") ? v : def;
}
mp.osd_message = function osd_message(text, duration) {
mp.commandv("show_text", text, Math.round(1000 * (duration || -1)));
}
// ----- dump: like print, but expands objects/arrays recursively -----
function replacer(k, v) {
var t = typeof v;
if (t == "function" || t == "undefined")
return "<" + t + ">";
if (Array.isArray(this) && t == "object" && v !== null) { // "safe" mode
if (this.indexOf(v) >= 0)
return "<VISITED>";
this.push(v);
}
return v;
}
function obj2str(v) {
try { // can process objects more than once, but throws on cycles
return JSON.stringify(v, replacer, 2);
} catch (e) { // simple safe: exclude visited objects, even if not cyclic
return JSON.stringify(v, replacer.bind([]), 2);
}
}
g.dump = function dump() {
var toprint = [];
for (var i = 0; i < arguments.length; i++) {
var v = arguments[i];
toprint.push((typeof v == "object") ? obj2str(v) : replacer(0, v));
}
print.apply(null, toprint);
}
/**********************************************************************
* main listeners and event loop
*********************************************************************/
mp.keep_running = true;
g.exit = function() { mp.keep_running = false }; // user-facing too
mp.register_event("shutdown", g.exit);
mp.register_event("property-change", notify_observer);
mp.register_event("client-message", dispatch_message);
mp.register_script_message("key-binding", dispatch_key_binding);
g.mp_event_loop = function mp_event_loop() {
var wait = 0; // seconds
do { // distapch events as long as they arrive, then do the timers
var e = mp.wait_event(wait);
if (e.event != "none") {
dispatch_event(e);
wait = 0; // poll the next one
} else {
wait = process_timers() / 1000;
}
} while (mp.keep_running);
};
})(this)
| gpl-2.0 |
Sean3Don/inkscape | src/2geom/point.h | 13205 | /**
* \file
* \brief Cartesian point / 2D vector and related operations
*//*
* Authors:
* Michael G. Sloan <[email protected]>
* Nathan Hurst <[email protected]>
* Krzysztof Kosiński <[email protected]>
*
* Copyright (C) 2006-2009 Authors
*
* This library is free software; you can redistribute it and/or
* modify it either under the terms of the GNU Lesser General Public
* License version 2.1 as published by the Free Software Foundation
* (the "LGPL") or, at your option, under the terms of the Mozilla
* Public License Version 1.1 (the "MPL"). If you do not alter this
* notice, a recipient may use your version of this file under either
* the MPL or the LGPL.
*
* You should have received a copy of the LGPL along with this library
* in the file COPYING-LGPL-2.1; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
* You should have received a copy of the MPL along with this library
* in the file COPYING-MPL-1.1
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
* OF ANY KIND, either express or implied. See the LGPL or the MPL for
* the specific language governing rights and limitations.
*/
#ifndef SEEN_Geom_POINT_H
#define SEEN_Geom_POINT_H
#include "config.h"
#include <iostream>
#include <iterator>
#include <boost/operators.hpp>
#include <2geom/forward.h>
#include <2geom/coord.h>
#include <2geom/int-point.h>
#include <2geom/math-utils.h>
#include <2geom/utils.h>
namespace Geom {
class Point
: boost::additive< Point
, boost::totally_ordered< Point
, boost::multiplicative< Point, Coord
, MultipliableNoncommutative< Point, Affine
, MultipliableNoncommutative< Point, Translate
, MultipliableNoncommutative< Point, Rotate
, MultipliableNoncommutative< Point, Scale
, MultipliableNoncommutative< Point, HShear
, MultipliableNoncommutative< Point, VShear
, MultipliableNoncommutative< Point, Zoom
> > > > > > > > > > // this uses chaining so it looks weird, but works
{
Coord _pt[2];
public:
/// @name Create points
/// @{
/** Construct a point on the origin. */
Point()
{ _pt[X] = _pt[Y] = 0; }
/** Construct a point from its coordinates. */
Point(Coord x, Coord y) {
_pt[X] = x; _pt[Y] = y;
}
/** Construct from integer point. */
Point(IntPoint const &p) {
_pt[X] = p[X];
_pt[Y] = p[Y];
}
Point(Point const &p) {
for (unsigned i = 0; i < 2; ++i)
_pt[i] = p._pt[i];
}
Point &operator=(Point const &p) {
for (unsigned i = 0; i < 2; ++i)
_pt[i] = p._pt[i];
return *this;
}
/** @brief Construct a point from its polar coordinates.
* The angle is specified in radians, in the mathematical convention (increasing
* counter-clockwise from +X). */
static Point polar(Coord angle, Coord radius) {
Point ret(polar(angle));
ret *= radius;
return ret;
}
/** @brief Construct an unit vector from its angle.
* The angle is specified in radians, in the mathematical convention (increasing
* counter-clockwise from +X). */
static Point polar(Coord angle) {
Point ret;
sincos(angle, ret[Y], ret[X]);
return ret;
}
/// @}
/// @name Access the coordinates of a point
/// @{
Coord operator[](unsigned i) const { return _pt[i]; }
Coord &operator[](unsigned i) { return _pt[i]; }
Coord operator[](Dim2 d) const throw() { return _pt[d]; }
Coord &operator[](Dim2 d) throw() { return _pt[d]; }
Coord x() const throw() { return _pt[X]; }
Coord &x() throw() { return _pt[X]; }
Coord y() const throw() { return _pt[Y]; }
Coord &y() throw() { return _pt[Y]; }
/// @}
/// @name Vector operations
/// @{
/** @brief Compute the distance from origin.
* @return Length of the vector from origin to this point */
Coord length() const { return hypot(_pt[0], _pt[1]); }
void normalize();
/** @brief Return a point like this point but rotated -90 degrees.
* If the y axis grows downwards and the x axis grows to the
* right, then this is 90 degrees counter-clockwise. */
Point ccw() const {
return Point(_pt[Y], -_pt[X]);
}
/** @brief Return a point like this point but rotated +90 degrees.
* If the y axis grows downwards and the x axis grows to the
* right, then this is 90 degrees clockwise. */
Point cw() const {
return Point(-_pt[Y], _pt[X]);
}
/// @}
/// @name Vector-like arithmetic operations
/// @{
Point operator-() const {
return Point(-_pt[X], -_pt[Y]);
}
Point &operator+=(Point const &o) {
for ( unsigned i = 0 ; i < 2 ; ++i ) {
_pt[i] += o._pt[i];
}
return *this;
}
Point &operator-=(Point const &o) {
for ( unsigned i = 0 ; i < 2 ; ++i ) {
_pt[i] -= o._pt[i];
}
return *this;
}
Point &operator*=(Coord s) {
for ( unsigned i = 0 ; i < 2 ; ++i ) _pt[i] *= s;
return *this;
}
Point &operator/=(Coord s) {
//TODO: s == 0?
for ( unsigned i = 0 ; i < 2 ; ++i ) _pt[i] /= s;
return *this;
}
/// @}
/// @name Affine transformations
/// @{
Point &operator*=(Affine const &m);
// implemented in transforms.cpp
Point &operator*=(Translate const &t);
Point &operator*=(Scale const &s);
Point &operator*=(Rotate const &r);
Point &operator*=(HShear const &s);
Point &operator*=(VShear const &s);
Point &operator*=(Zoom const &z);
/// @}
/// @name Conversion to integer points
/// @{
/** @brief Round to nearest integer coordinates. */
IntPoint round() const {
IntPoint ret(::round(_pt[X]), ::round(_pt[Y]));
return ret;
}
/** @brief Round coordinates downwards. */
IntPoint floor() const {
IntPoint ret(::floor(_pt[X]), ::floor(_pt[Y]));
return ret;
}
/** @brief Round coordinates upwards. */
IntPoint ceil() const {
IntPoint ret(::ceil(_pt[X]), ::ceil(_pt[Y]));
return ret;
}
/// @}
/// @name Various utilities
/// @{
/** @brief Check whether both coordinates are finite. */
bool isFinite() const {
for ( unsigned i = 0 ; i < 2 ; ++i ) {
if(!IS_FINITE(_pt[i])) return false;
}
return true;
}
/** @brief Check whether both coordinates are zero. */
bool isZero() const {
return _pt[X] == 0 && _pt[Y] == 0;
}
/** @brief Check whether the length of the vector is close to 1. */
bool isNormalized(Coord eps=EPSILON) const {
return are_near(length(), 1.0, eps);
}
/** @brief Equality operator.
* This tests for exact identity (as opposed to are_near()). Note that due to numerical
* errors, this test might return false even if the points should be identical. */
bool operator==(const Point &in_pnt) const {
return (_pt[X] == in_pnt[X]) && (_pt[Y] == in_pnt[Y]);
}
/** @brief Lexicographical ordering for points.
* Y coordinate is regarded as more significant. When sorting according to this
* ordering, the points will be sorted according to the Y coordinate, and within
* points with the same Y coordinate according to the X coordinate. */
bool operator<(const Point &p) const {
return _pt[Y] < p[Y] || (_pt[Y] == p[Y] && _pt[X] < p[X]);
}
/// @}
/** @brief Lexicographical ordering functor. */
template <Dim2 d> struct LexOrder;
/** @brief Lexicographical ordering functor with runtime dimension. */
class LexOrderRt {
public:
LexOrderRt(Dim2 d) : dim(d) {}
inline bool operator()(Point const &a, Point const &b);
private:
Dim2 dim;
};
friend inline std::ostream &operator<< (std::ostream &out_file, const Geom::Point &in_pnt);
};
/** @brief Output operator for points.
* Prints out the coordinates.
* @relates Point */
inline std::ostream &operator<< (std::ostream &out_file, const Geom::Point &in_pnt) {
out_file << "X: " << in_pnt[X] << " Y: " << in_pnt[Y];
return out_file;
}
template<> struct Point::LexOrder<X> {
bool operator()(Point const &a, Point const &b) {
return a[X] < b[X] || (a[X] == b[X] && a[Y] < b[Y]);
}
};
template<> struct Point::LexOrder<Y> {
bool operator()(Point const &a, Point const &b) {
return a[Y] < b[Y] || (a[Y] == b[Y] && a[X] < b[X]);
}
};
inline bool Point::LexOrderRt::operator()(Point const &a, Point const &b) {
return dim ? Point::LexOrder<Y>()(a, b) : Point::LexOrder<X>()(a, b);
}
/** @brief Compute the second (Euclidean) norm of @a p.
* This corresponds to the length of @a p. The result will not overflow even if
* \f$p_X^2 + p_Y^2\f$ is larger that the maximum value that can be stored
* in a <code>double</code>.
* @return \f$\sqrt{p_X^2 + p_Y^2}\f$
* @relates Point */
inline Coord L2(Point const &p)
{
return p.length();
}
/** @brief Compute the square of the Euclidean norm of @a p.
* Warning: this can overflow where L2 won't.
* @return \f$p_X^2 + p_Y^2\f$
* @relates Point */
inline Coord L2sq(Point const &p)
{
return p[0]*p[0] + p[1]*p[1];
}
//IMPL: NearConcept
/** @brief Nearness predicate for points.
* True if neither coordinate of @a a is further than @a eps from the corresponding
* coordinate of @a b.
* @relates Point */
inline bool are_near(Point const &a, Point const &b, double const eps=EPSILON)
{
return ( are_near(a[X],b[X],eps) && are_near(a[Y],b[Y],eps) );
}
/** @brief Return a point halfway between the specified ones.
* @relates Point */
inline Point middle_point(Point const& P1, Point const& P2)
{
return (P1 + P2) / 2;
}
/** @brief Returns p * Geom::rotate_degrees(90), but more efficient.
*
* Angle direction in 2Geom: If you use the traditional mathematics convention that y
* increases upwards, then positive angles are anticlockwise as per the mathematics convention. If
* you take the common non-mathematical convention that y increases downwards, then positive angles
* are clockwise, as is common outside of mathematics.
*
* There is no function to rotate by -90 degrees: use -rot90(p) instead.
* @relates Point */
inline Point rot90(Point const &p)
{
return Point(-p[Y], p[X]);
}
/** @brief Linear interpolation between two points.
* @param t Time value
* @param a First point
* @param b Second point
* @return Point on a line between a and b. The ratio of its distance from a
* and the distance between a and b will be equal to t.
* @relates Point */
inline Point lerp(double const t, Point const &a, Point const &b)
{
return (a * (1 - t) + b * t);
}
/** @brief Compute the dot product of a and b.
* Dot product can be interpreted as a measure of how parallel the vectors are.
* For perpendicular vectors, it is zero. For parallel ones, its absolute value is highest,
* and the sign depends on whether they point in the same direction (+) or opposite ones (-).
* @return \f$a \cdot b = a_X b_X + a_Y b_Y\f$.
* @relates Point */
inline Coord dot(Point const &a, Point const &b)
{
return a[0] * b[0] + a[1] * b[1];
}
/** @brief Compute the 2D cross product.
* Defined as dot(a, b.cw()). This means it will be zero for parallel vectors,
* and its absolute value highest for perpendicular vectors.
* @relates Point*/
inline Coord cross(Point const &a, Point const &b)
{
return dot(a, b.cw());
}
/** @brief Compute the (Euclidean) distance between points.
* @relates Point */
inline Coord distance (Point const &a, Point const &b)
{
return L2(a - b);
}
/** @brief Compute the square of the distance between points.
* @relates Point */
inline Coord distanceSq (Point const &a, Point const &b)
{
return L2sq(a - b);
}
Point unit_vector(Point const &a);
Coord L1(Point const &p);
Coord LInfty(Point const &p);
bool is_zero(Point const &p);
bool is_unit_vector(Point const &p);
double atan2(Point const &p);
double angle_between(Point const &a, Point const &b);
Point abs(Point const &b);
Point constrain_angle(Point const &A, Point const &B, unsigned int n = 4, Geom::Point const &dir = Geom::Point(1,0));
} /* namespace Geom */
// This is required to fix a bug in GCC 4.3.3 (and probably others) that causes the compiler
// to try to instantiate the iterator_traits template and fail. Probably it thinks that Point
// is an iterator and tries to use std::distance instead of Geom::distance.
namespace std {
template <> class iterator_traits<Geom::Point> {};
}
#endif /* !SEEN_Geom_POINT_H */
/*
Local Variables:
mode:c++
c-file-style:"stroustrup"
c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
indent-tabs-mode:nil
fill-column:99
End:
*/
// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
| gpl-2.0 |
binhqnguyen/lena | nsc/openbsd3/sys/ttycom.h | 6740 | /* $OpenBSD: ttycom.h,v 1.6 2003/06/02 23:28:22 millert Exp $ */
/* $NetBSD: ttycom.h,v 1.4 1996/05/19 17:17:53 jonathan Exp $ */
/*-
* Copyright (c) 1982, 1986, 1990, 1993, 1994
* The Regents of the University of California. All rights reserved.
* (c) UNIX System Laboratories, Inc.
* All or some portions of this file are derived from material licensed
* to the University of California by American Telephone and Telegraph
* Co. or Unix System Laboratories, Inc. and are reproduced herein with
* the permission of UNIX System Laboratories, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)ttycom.h 8.1 (Berkeley) 3/28/94
*/
#ifndef _SYS_TTYCOM_H_
#define _SYS_TTYCOM_H_
#include <sys/ioccom.h>
/*
* Tty ioctl's except for those supported only for backwards compatibility
* with the old tty driver.
*/
/*
* Window/terminal size structure. This information is stored by the kernel
* in order to provide a consistent interface, but is not used by the kernel.
*/
struct winsize {
unsigned short ws_row; /* rows, in characters */
unsigned short ws_col; /* columns, in characters */
unsigned short ws_xpixel; /* horizontal size, pixels */
unsigned short ws_ypixel; /* vertical size, pixels */
};
#define TIOCM_LE 0001 /* line enable */
#define TIOCM_DTR 0002 /* data terminal ready */
#define TIOCM_RTS 0004 /* request to send */
#define TIOCM_ST 0010 /* secondary transmit */
#define TIOCM_SR 0020 /* secondary receive */
#define TIOCM_CTS 0040 /* clear to send */
#define TIOCM_CAR 0100 /* carrier detect */
#define TIOCM_CD TIOCM_CAR
#define TIOCM_RNG 0200 /* ring */
#define TIOCM_RI TIOCM_RNG
#define TIOCM_DSR 0400 /* data set ready */
/* 8-10 compat */
#define TIOCEXCL _IO('t', 13) /* set exclusive use of tty */
#define TIOCNXCL _IO('t', 14) /* reset exclusive use of tty */
/* 15 unused */
#define TIOCFLUSH _IOW('t', 16, int) /* flush buffers */
/* 17-18 compat */
#define TIOCGETA _IOR('t', 19, struct termios) /* get termios struct */
#define TIOCSETA _IOW('t', 20, struct termios) /* set termios struct */
#define TIOCSETAW _IOW('t', 21, struct termios) /* drain output, set */
#define TIOCSETAF _IOW('t', 22, struct termios) /* drn out, fls in, set */
#define TIOCGETD _IOR('t', 26, int) /* get line discipline */
#define TIOCSETD _IOW('t', 27, int) /* set line discipline */
/* 127-124 compat */
#define TIOCSBRK _IO('t', 123) /* set break bit */
#define TIOCCBRK _IO('t', 122) /* clear break bit */
#define TIOCSDTR _IO('t', 121) /* set data terminal ready */
#define TIOCCDTR _IO('t', 120) /* clear data terminal ready */
#define TIOCGPGRP _IOR('t', 119, int) /* get pgrp of tty */
#define TIOCSPGRP _IOW('t', 118, int) /* set pgrp of tty */
/* 117-116 compat */
#define TIOCOUTQ _IOR('t', 115, int) /* output queue size */
#define TIOCSTI _IOW('t', 114, char) /* simulate terminal input */
#define TIOCNOTTY _IO('t', 113) /* void tty association */
#define TIOCPKT _IOW('t', 112, int) /* pty: set/clear packet mode */
#define TIOCPKT_DATA 0x00 /* data packet */
#define TIOCPKT_FLUSHREAD 0x01 /* flush packet */
#define TIOCPKT_FLUSHWRITE 0x02 /* flush packet */
#define TIOCPKT_STOP 0x04 /* stop output */
#define TIOCPKT_START 0x08 /* start output */
#define TIOCPKT_NOSTOP 0x10 /* no more ^S, ^Q */
#define TIOCPKT_DOSTOP 0x20 /* now do ^S ^Q */
#define TIOCPKT_IOCTL 0x40 /* state change of pty driver */
#define TIOCSTOP _IO('t', 111) /* stop output, like ^S */
#define TIOCSTART _IO('t', 110) /* start output, like ^Q */
#define TIOCMSET _IOW('t', 109, int) /* set all modem bits */
#define TIOCMBIS _IOW('t', 108, int) /* bis modem bits */
#define TIOCMBIC _IOW('t', 107, int) /* bic modem bits */
#define TIOCMGET _IOR('t', 106, int) /* get all modem bits */
#define TIOCREMOTE _IOW('t', 105, int) /* remote input editing */
#define TIOCGWINSZ _IOR('t', 104, struct winsize) /* get window size */
#define TIOCSWINSZ _IOW('t', 103, struct winsize) /* set window size */
#define TIOCUCNTL _IOW('t', 102, int) /* pty: set/clr usr cntl mode */
#define TIOCSTAT _IOW('t', 101, int) /* generate status message */
#define UIOCCMD(n) _IO('u', n) /* usr cntl op "n" */
#define TIOCCONS _IOW('t', 98, int) /* become virtual console */
#define TIOCSCTTY _IO('t', 97) /* become controlling tty */
#define TIOCEXT _IOW('t', 96, int) /* pty: external processing */
#define TIOCSIG _IO('t', 95) /* pty: generate signal */
#define TIOCDRAIN _IO('t', 94) /* wait till output drained */
#define TIOCGFLAGS _IOR('t', 93, int) /* get device flags */
#define TIOCSFLAGS _IOW('t', 92, int) /* set device flags */
#define TIOCFLAG_SOFTCAR 0x01 /* ignore hardware carrier */
#define TIOCFLAG_CLOCAL 0x02 /* set clocal on open */
#define TIOCFLAG_CRTSCTS 0x04 /* set crtscts on open */
#define TIOCFLAG_MDMBUF 0x08 /* set mdmbuf on open */
#define TIOCFLAG_PPS 0x10 /* call hardpps on carrier up */
/* Backwards compatibility */
#define TIOCMODG TIOCMGET
#define TIOCMODS TIOCMSET
#define TTYDISC 0 /* termios tty line discipline */
#define TABLDISC 3 /* tablet discipline */
#define SLIPDISC 4 /* serial IP discipline */
#define PPPDISC 5 /* ppp discipline */
#define STRIPDISC 6 /* metricom wireless IP discipline */
#endif /* !_SYS_TTYCOM_H_ */
| gpl-2.0 |
darkspr1te/seagate_central_cns3420_2-6-35 | include/linux/spinlock_sop.h | 402 | /*
* Since we need both UP & SMP version of spinlock implementaions in
* SOP platform, new spinlock APIs are introduced.
* - The standard spinlock API will be of UP version.
* - The standard API will be suffixed with 'smp' when there is no change in
* the original SMP implementation else it will be suffixed with 'sop'.
*/
#include <asm/spinlock_types_sop.h>
#include <asm/spinlock_sop.h>
| gpl-2.0 |
domino-team/openwrt-cc | package/gli-pub/openwrt-node-packages-master/node/node-v6.9.1/test/parallel/test-buffer-fakes.js | 994 | 'use strict';
require('../common');
const assert = require('assert');
const Buffer = require('buffer').Buffer;
function FakeBuffer() { }
Object.setPrototypeOf(FakeBuffer, Buffer);
Object.setPrototypeOf(FakeBuffer.prototype, Buffer.prototype);
const fb = new FakeBuffer();
assert.throws(function() {
Buffer.from(fb);
}, TypeError);
assert.throws(function() {
+Buffer.prototype;
}, TypeError);
assert.throws(function() {
Buffer.compare(fb, Buffer.alloc(0));
}, TypeError);
assert.throws(function() {
fb.write('foo');
}, TypeError);
assert.throws(function() {
Buffer.concat([fb, fb]);
}, TypeError);
assert.throws(function() {
fb.toString();
}, TypeError);
assert.throws(function() {
fb.equals(Buffer.alloc(0));
}, TypeError);
assert.throws(function() {
fb.indexOf(5);
}, TypeError);
assert.throws(function() {
fb.readFloatLE(0);
}, TypeError);
assert.throws(function() {
fb.writeFloatLE(0);
}, TypeError);
assert.throws(function() {
fb.fill(0);
}, TypeError);
| gpl-2.0 |
ccompiler4pic32/pic32-gcc | gcc/testsuite/gcc.target/arm/neon/vclts16.c | 614 | /* Test the `vclts16' ARM Neon intrinsic. */
/* This file was autogenerated by neon-testgen. */
/* { dg-do assemble } */
/* { dg-require-effective-target arm_neon_ok } */
/* { dg-options "-save-temps -O0" } */
/* { dg-add-options arm_neon } */
#include "arm_neon.h"
void test_vclts16 (void)
{
uint16x4_t out_uint16x4_t;
int16x4_t arg0_int16x4_t;
int16x4_t arg1_int16x4_t;
out_uint16x4_t = vclt_s16 (arg0_int16x4_t, arg1_int16x4_t);
}
/* { dg-final { scan-assembler "vcgt\.s16\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@.*\)?\n" } } */
/* { dg-final { cleanup-saved-temps } } */
| gpl-2.0 |
cooler-SAI/murlocs_434 | sql_300m/world/lightwell_fix.sql | 865 | -- TODO: update for 4.x
-- DELETE FROM npc_spellclick_spells WHERE npc_entry IN (31897, 31896, 31895, 31894, 31893, 31883);
-- INSERT INTO npc_spellclick_spells (npc_entry, spell_id, cast_flags) VALUES (31897, 7001, 2), (31896, 27873, 2), (31895, 27874, 2), (31894, 28276, 2), (31893, 48084, 2), (31883, 48085, 2);
-- DELETE FROM creature_template_addon WHERE entry IN (31897, 31896, 31895, 31894, 31893, 31883);
-- INSERT INTO creature_template_addon (entry, auras) VALUES (31897, "59907 0"), (31896, "59907 0"), (31895, "59907 0"), (31894, "59907 0"), (31893, "59907 0"), (31883, "59907 0");
-- delete from spell_bonus_data where entry in (7001, 27873, 27874, 28276, 48084, 48085);
-- insert into spell_bonus_data (entry, direct_bonus, dot_bonus) VALUES (7001, 0, 0.63), (27873, 0, 0.63), (27874, 0, 0.63), (28276, 0, 0.63), (48084, 0, 0.63), (48085, 0, 0.63);
| gpl-2.0 |
felixsinger/coreboot | src/mainboard/google/volteer/variants/lindar/memory.c | 2161 | /* SPDX-License-Identifier: GPL-2.0-only */
#include <baseboard/variants.h>
#include <boardid.h>
static const struct mb_cfg board_memcfg = {
.type = MEM_TYPE_LP4X,
.lp4x_dq_map = {
.ddr0 = {
.dq0 = { 3, 1, 0, 2, 4, 6, 7, 5, }, /* DDR0_DQ0[7:0] */
.dq1 = { 12, 13, 14, 15, 8, 9, 10, 11 }, /* DDR0_DQ1[7:0] */
},
.ddr1 = {
.dq0 = { 0, 7, 1, 6, 2, 4, 3, 5, }, /* DDR1_DQ0[7:0] */
.dq1 = { 8, 15, 14, 9, 13, 10, 12, 11 }, /* DDR1_DQ1[7:0] */
},
.ddr2 = {
.dq0 = { 3, 2, 0, 1, 4, 5, 6, 7, }, /* DDR2_DQ0[7:0] */
.dq1 = { 12, 13, 15, 14, 8, 9, 10, 11 }, /* DDR2_DQ1[7:0] */
},
.ddr3 = {
.dq0 = { 6, 0, 1, 7, 5, 4, 2, 3, }, /* DDR3_DQ0[7:0] */
.dq1 = { 15, 14, 8, 9, 10, 12, 11, 13 }, /* DDR3_DQ1[7:0] */
},
.ddr4 = {
.dq0 = { 5, 0, 1, 3, 4, 2, 7, 6, }, /* DDR4_DQ0[7:0] */
.dq1 = { 11, 14, 13, 12, 8, 9, 15, 10 }, /* DDR4_DQ1[7:0] */
},
.ddr5 = {
.dq0 = { 3, 4, 2, 5, 0, 6, 1, 7, }, /* DDR5_DQ0[7:0] */
.dq1 = { 13, 12, 11, 10, 14, 15, 9, 8 }, /* DDR5_DQ1[7:0] */
},
.ddr6 = {
.dq0 = { 3, 2, 1, 0, 5, 4, 7, 6, }, /* DDR6_DQ0[7:0] */
.dq1 = { 12, 13, 15, 14, 8, 11, 9, 10 }, /* DDR6_DQ1[7:0] */
},
.ddr7 = {
.dq0 = { 3, 4, 2, 5, 1, 0, 7, 6, }, /* DDR7_DQ0[7:0] */
.dq1 = { 15, 14, 9, 8, 12, 10, 11, 13 }, /* DDR7_DQ1[7:0] */
},
},
/* DQS CPU<>DRAM map */
.lp4x_dqs_map = {
.ddr0 = { .dqs0 = 0, .dqs1 = 1 }, /* DDR0_DQS[1:0] */
.ddr1 = { .dqs0 = 0, .dqs1 = 1 }, /* DDR1_DQS[1:0] */
.ddr2 = { .dqs0 = 0, .dqs1 = 1 }, /* DDR2_DQS[1:0] */
.ddr3 = { .dqs0 = 0, .dqs1 = 1 }, /* DDR3_DQS[1:0] */
.ddr4 = { .dqs0 = 0, .dqs1 = 1 }, /* DDR4_DQS[1:0] */
.ddr5 = { .dqs0 = 0, .dqs1 = 1 }, /* DDR5_DQS[1:0] */
.ddr6 = { .dqs0 = 0, .dqs1 = 1 }, /* DDR6_DQS[1:0] */
.ddr7 = { .dqs0 = 0, .dqs1 = 1 }, /* DDR7_DQS[1:0] */
},
.ect = true, /* Enable Early Command Training */
};
const struct mb_cfg *variant_memory_params(void)
{
return &board_memcfg;
}
void memcfg_variant_init(FSPM_UPD *mupd)
{
FSP_M_CONFIG *mem_cfg = &mupd->FspmConfig;
if (board_id() == 1)
mem_cfg->SaGv = 0x00;
}
| gpl-2.0 |
FanWuUCL/memory | gawk/src/extension/readdir.c | 5560 | /*
* readdir.c --- Provide an input parser to read directories
*
* Arnold Robbins
* [email protected]
* Written 7/2012
*
* Andrew Schorr and Arnold Robbins: further fixes 8/2012.
* Simplified 11/2012.
*/
/*
* Copyright (C) 2012, 2013 the Free Software Foundation, Inc.
*
* This file is part of GAWK, the GNU implementation of the
* AWK Programming Language.
*
* GAWK is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* GAWK is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#define _BSD_SOURCE
#include <stdio.h>
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#ifdef HAVE_DIRENT_H
#include <dirent.h>
#else
#error Cannot compile the dirent extension on this system!
#endif
#include "gawkdirfd.h"
#include "gawkapi.h"
#include "gettext.h"
#define _(msgid) gettext(msgid)
#define N_(msgid) msgid
static const gawk_api_t *api; /* for convenience macros to work */
static awk_ext_id_t *ext_id;
static const char *ext_version = "readdir extension: version 1.0";
static awk_bool_t init_readdir(void);
static awk_bool_t (*init_func)(void) = init_readdir;
int plugin_is_GPL_compatible;
/* data type for the opaque pointer: */
typedef struct open_directory {
DIR *dp;
char *buf;
} open_directory_t;
/* ftype --- return type of file as a single character string */
static const char *
ftype(struct dirent *entry)
{
#ifdef DT_BLK
switch (entry->d_type) {
case DT_BLK: return "b";
case DT_CHR: return "c";
case DT_DIR: return "d";
case DT_FIFO: return "p";
case DT_LNK: return "l";
case DT_REG: return "f";
case DT_SOCK: return "s";
default:
case DT_UNKNOWN: return "u";
}
#else
return "u";
#endif
}
/* dir_get_record --- get one record at a time out of a directory */
static int
dir_get_record(char **out, awk_input_buf_t *iobuf, int *errcode,
char **rt_start, size_t *rt_len)
{
DIR *dp;
struct dirent *dirent;
int len;
open_directory_t *the_dir;
const char *ftstr;
/*
* The caller sets *errcode to 0, so we should set it only if an
* error occurs.
*/
if (out == NULL || iobuf == NULL || iobuf->opaque == NULL)
return EOF;
the_dir = (open_directory_t *) iobuf->opaque;
dp = the_dir->dp;
/*
* Initialize errno, since readdir does not set it to zero on EOF.
*/
errno = 0;
dirent = readdir(dp);
if (dirent == NULL) {
*errcode = errno; /* in case there was an error */
return EOF;
}
#ifdef ZOS_USS
len = sprintf(the_dir->buf, "%lu/%s",
(unsigned long) dirent->d_ino,
dirent->d_name);
#else
len = sprintf(the_dir->buf, "%llu/%s",
(unsigned long long) dirent->d_ino,
dirent->d_name);
#endif
ftstr = ftype(dirent);
len += sprintf(the_dir->buf + len, "/%s", ftstr);
*out = the_dir->buf;
*rt_len = 0; /* set RT to "" */
return len;
}
/* dir_close --- close up when done */
static void
dir_close(awk_input_buf_t *iobuf)
{
open_directory_t *the_dir;
if (iobuf == NULL || iobuf->opaque == NULL)
return;
the_dir = (open_directory_t *) iobuf->opaque;
closedir(the_dir->dp);
free(the_dir->buf);
free(the_dir);
iobuf->fd = -1;
}
/* dir_can_take_file --- return true if we want the file */
static awk_bool_t
dir_can_take_file(const awk_input_buf_t *iobuf)
{
if (iobuf == NULL)
return awk_false;
return (iobuf->fd != INVALID_HANDLE && S_ISDIR(iobuf->sbuf.st_mode));
}
/*
* dir_take_control_of --- set up input parser.
* We can assume that dir_can_take_file just returned true,
* and no state has changed since then.
*/
static awk_bool_t
dir_take_control_of(awk_input_buf_t *iobuf)
{
DIR *dp;
open_directory_t *the_dir;
size_t size;
errno = 0;
#ifdef HAVE_FDOPENDIR
dp = fdopendir(iobuf->fd);
#else
dp = opendir(iobuf->name);
if (dp != NULL)
iobuf->fd = dirfd(dp);
#endif
if (dp == NULL) {
warning(ext_id, _("dir_take_control_of: opendir/fdopendir failed: %s"),
strerror(errno));
update_ERRNO_int(errno);
return awk_false;
}
emalloc(the_dir, open_directory_t *, sizeof(open_directory_t), "dir_take_control_of");
the_dir->dp = dp;
size = sizeof(struct dirent) + 21 /* max digits in inode */ + 2 /* slashes */;
emalloc(the_dir->buf, char *, size, "dir_take_control_of");
iobuf->opaque = the_dir;
iobuf->get_record = dir_get_record;
iobuf->close_func = dir_close;
return awk_true;
}
static awk_input_parser_t readdir_parser = {
"readdir",
dir_can_take_file,
dir_take_control_of,
NULL
};
#ifdef TEST_DUPLICATE
static awk_input_parser_t readdir_parser2 = {
"readdir2",
dir_can_take_file,
dir_take_control_of,
NULL
};
#endif
/* init_readdir --- set things ups */
static awk_bool_t
init_readdir()
{
register_input_parser(& readdir_parser);
#ifdef TEST_DUPLICATE
register_input_parser(& readdir_parser2);
#endif
return awk_true;
}
static awk_ext_func_t func_table[] = {
{ NULL, NULL, 0 }
};
/* define the dl_load function using the boilerplate macro */
dl_load_func(func_table, readdir, "")
| gpl-2.0 |
hroark13/android_kernel_zte_draconis | drivers/cpufreq/Makefile | 3520 | # CPUfreq core
obj-$(CONFIG_CPU_FREQ) += cpufreq.o cpu-boost.o
# CPUfreq stats
obj-$(CONFIG_CPU_FREQ_STAT) += cpufreq_stats.o
# CPUfreq governors
obj-$(CONFIG_CPU_FREQ_GOV_PERFORMANCE) += cpufreq_performance.o
obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o
obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o
obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o
obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o
obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE) += cpufreq_interactive.o
obj-$(CONFIG_CPU_FREQ_GOV_ADAPTIVE) += cpufreq_adaptive.o
obj-$(CONFIG_CPU_FREQ_GOV_BADASS) += cpufreq_badass.o
obj-$(CONFIG_CPU_FREQ_GOV_DARKNESS) += cpufreq_darkness.o
obj-$(CONFIG_CPU_FREQ_GOV_DANCEDANCE) += cpufreq_dancedance.o
obj-$(CONFIG_CPU_FREQ_GOV_HYPER) += cpufreq_hyper.o
obj-$(CONFIG_CPU_FREQ_GOV_INTELLIACTIVE)+= cpufreq_intelliactive.o
obj-$(CONFIG_CPU_FREQ_GOV_INTELLIDEMAND)+= cpufreq_intellidemand.o
obj-$(CONFIG_CPU_FREQ_GOV_INTELLIMM) += cpufreq_intellimm.o
obj-$(CONFIG_CPU_FREQ_GOV_LAZY) += cpufreq_lazy.o
obj-$(CONFIG_CPU_FREQ_GOV_LIONHEART) += cpufreq_lionheart.o
obj-$(CONFIG_CPU_FREQ_GOV_NIGHTMARE) += cpufreq_nightmare.o
obj-$(CONFIG_CPU_FREQ_GOV_OPTIMAX) += cpufreq_optimax.o
obj-$(CONFIG_CPU_FREQ_GOV_PEGASUSQ) += cpufreq_pegasusq.o
obj-$(CONFIG_CPU_FREQ_GOV_TRIPNDROID) += cpufreq_tripndroid.o
obj-$(CONFIG_CPU_FREQ_GOV_UBERDEMAND) += cpufreq_uberdemand.o
obj-$(CONFIG_CPU_FREQ_GOV_WHEATLEY) += cpufreq_wheatley.o
# CPUfreq cross-arch helpers
obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o
##################################################################################
# x86 drivers.
# Link order matters. K8 is preferred to ACPI because of firmware bugs in early
# K8 systems. ACPI is preferred to all other hardware-specific drivers.
# speedstep-* is preferred over p4-clockmod.
obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o mperf.o
obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o mperf.o
obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o
obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o
obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o
obj-$(CONFIG_X86_LONGHAUL) += longhaul.o
obj-$(CONFIG_X86_E_POWERSAVER) += e_powersaver.o
obj-$(CONFIG_ELAN_CPUFREQ) += elanfreq.o
obj-$(CONFIG_SC520_CPUFREQ) += sc520_freq.o
obj-$(CONFIG_X86_LONGRUN) += longrun.o
obj-$(CONFIG_X86_GX_SUSPMOD) += gx-suspmod.o
obj-$(CONFIG_X86_SPEEDSTEP_ICH) += speedstep-ich.o
obj-$(CONFIG_X86_SPEEDSTEP_LIB) += speedstep-lib.o
obj-$(CONFIG_X86_SPEEDSTEP_SMI) += speedstep-smi.o
obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o
obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o
obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o
##################################################################################
# ARM SoC drivers
obj-$(CONFIG_UX500_SOC_DB8500) += db8500-cpufreq.o
obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o
obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o
obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ) += exynos4x12-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o
obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
##################################################################################
# PowerPC platform drivers
obj-$(CONFIG_CPU_FREQ_MAPLE) += maple-cpufreq.o
obj-$(CONFIG_MSM_DCVS) += cpufreq_gov_msm.o
| gpl-2.0 |
haisuu111/shop | wp-content/themes/layerswp/functions.php | 13786 | <?php
/**
* @package Layers
*/
/**
* Add define Layers constants to be used around Layers themes, plugins etc.
*/
/**
* The current version of the theme. Use a random number for SCRIPT_DEBUG mode
*/
define( 'LAYERS_VERSION', '1.2.2' );
define( 'LAYERS_TEMPLATE_URI' , get_template_directory_uri() );
define( 'LAYERS_TEMPLATE_DIR' , get_template_directory() );
define( 'LAYERS_THEME_TITLE' , 'Layers' );
define( 'LAYERS_THEME_SLUG' , 'layers' );
define( 'LAYERS_BUILDER_TEMPLATE' , 'builder.php' );
/**
* Set the content width based on the theme's design and stylesheet.
*/
if ( ! isset( $content_width ) )
$content_width = 1080; /* pixels */
/**
* Adjust the content width when the full width page template is being used
*/
function layers_set_content_width() {
global $content_width;
$left_sidebar_active = layers_can_show_sidebar( 'left-sidebar' );
$right_sidebar_active = layers_can_show_sidebar( 'right-sidebar' );
if( is_page_template( LAYERS_BUILDER_TEMPLATE ) ) {
$content_width = 1080;
} else if( is_page_template( 'template-both-sidebar.php' ) ||
is_page_template( 'template-left-sidebar.php' ) ||
is_page_template( 'template-right-sidebar.php' ) ){
$content_width = 660;
} elseif ( is_page_template( 'template-blog.php' ) ) {
$content_width = 1080;
} elseif( $left_sidebar_active || $right_sidebar_active ){
$content_width = 660;
}
}
add_action( 'template_redirect', 'layers_set_content_width' );
/*
* Third Party Scripts
*/
require_once get_template_directory() . '/core/third-party/site-logo.php';
/*
* Load Widgets
*/
require_once get_template_directory() . '/core/widgets/init.php';
/*
* Load Customizer Support
*/
require_once get_template_directory() . '/core/customizer/init.php';
/*
* Load Custom Post Meta
*/
require_once get_template_directory() . '/core/meta/init.php';
/*
* Load Front-end helpers
*/
require_once get_template_directory() . '/core/helpers/color.php';
require_once get_template_directory() . '/core/helpers/custom-fonts.php';
require_once get_template_directory() . '/core/helpers/extensions.php';
require_once get_template_directory() . '/core/helpers/post.php';
require_once get_template_directory() . '/core/helpers/post-types.php';
require_once get_template_directory() . '/core/helpers/sanitization.php';
require_once get_template_directory() . '/core/helpers/template.php';
require_once get_template_directory() . '/core/helpers/woocommerce.php';
/*
* Load Admin-specific files
*/
if( is_admin() ){
// Include form item class
require_once get_template_directory() . '/core/helpers/forms.php';
// Include design bar class
require_once get_template_directory() . '/core/helpers/design-bar.php';
// Include API class
require_once get_template_directory() . '/core/helpers/api.php';
// Include widget export/import class
require_once get_template_directory() . '/core/helpers/migrator.php';
//Load Options Panel
require_once get_template_directory() . '/core/options-panel/init.php';
}
if( ! function_exists( 'layers_setup' ) ) {
function layers_setup(){
global $pagenow;
/**
* Add support for HTML5
*/
add_theme_support('html5');
/**
* Add support for Title Tags
*/
add_theme_support('title-tag');
/**
* Add support for widgets inside the customizer
*/
add_theme_support('widget-customizer');
/**
* Add support for WooCommerce
*/
add_theme_support( 'woocommerce' );
/**
* Add support for featured images
*/
add_theme_support( 'post-thumbnails' );
// Set Large Image Sizes
add_image_size( 'layers-square-large', 1000, 1000, true );
add_image_size( 'layers-portrait-large', 720, 1000, true );
add_image_size( 'layers-landscape-large', 1000, 720, true );
// Set Medium Image Sizes
add_image_size( 'layers-square-medium', 480, 480, true );
add_image_size( 'layers-portrait-medium', 340, 480, true );
add_image_size( 'layers-landscape-medium', 480, 340, true );
/**
* Add text domain
*/
load_theme_textdomain('layerswp', get_template_directory() . '/languages');
/**
* Add theme support
*/
// Custom Site Logo
add_theme_support( 'site-logo', array(
'header-text' => array(
'sitetitle',
'tagline',
),
'size' => 'medium',
) );
// Automatic Feed Links
add_theme_support( 'automatic-feed-links' );
/**
* Register nav menus
*/
register_nav_menus( array(
LAYERS_THEME_SLUG . '-secondary-left' => __( 'Top Left Menu' , 'layerswp' ),
LAYERS_THEME_SLUG . '-secondary-right' => __( 'Top Right Menu' , 'layerswp' ),
LAYERS_THEME_SLUG . '-primary' => __( 'Header Menu' , 'layerswp' ),
LAYERS_THEME_SLUG . '-primary-right' => __( 'Right Header Menu' , 'layerswp' ),
LAYERS_THEME_SLUG . '-footer' => __( 'Footer Menu' , 'layerswp' ),
) );
/**
* Welcome Redirect
*/
if( isset($_GET["activated"]) && $pagenow = "themes.php" ) { //&& '' == get_option( 'layers_welcome' )
update_option( 'layers_welcome' , 1);
wp_safe_redirect( admin_url('admin.php?page=' . LAYERS_THEME_SLUG . '-get-started'));
}
} // function layers_setup
} // if !function layers_setup
add_action( 'after_setup_theme' , 'layers_setup', 10 );
/**
* Enqueue front end styles and scripts
*/
if( ! function_exists( 'layers_register_standard_sidebars' ) ) {
function layers_register_standard_sidebars(){
/**
* Register Standard Sidebars
*/
register_sidebar( array(
'id' => LAYERS_THEME_SLUG . '-off-canvas-sidebar',
'name' => __( 'Mobile Sidebar' , 'layerswp' ),
'description' => __( 'This sidebar will only appear on mobile devices.' , 'layerswp' ),
'before_widget' => '<aside id="%1$s" class="content widget %2$s">',
'after_widget' => '</aside>',
'before_title' => '<h5 class="section-nav-title">',
'after_title' => '</h5>',
) );
register_sidebar( array(
'id' => LAYERS_THEME_SLUG . '-left-sidebar',
'name' => __( 'Left Sidebar' , 'layerswp' ),
'before_widget' => '<aside id="%1$s" class="content well push-bottom-large widget %2$s">',
'after_widget' => '</aside>',
'before_title' => '<h5 class="section-nav-title">',
'after_title' => '</h5>',
) );
register_sidebar( array(
'id' => LAYERS_THEME_SLUG . '-right-sidebar',
'name' => __( 'Right Sidebar' , 'layerswp' ),
'before_widget' => '<aside id="%1$s" class="content well push-bottom-large widget %2$s">',
'after_widget' => '</aside>',
'before_title' => '<h5 class="section-nav-title">',
'after_title' => '</h5>',
) );
/**
* Register Footer Sidebars
*/
for( $footer = 1; $footer < 5; $footer++ ) {
register_sidebar( array(
'id' => LAYERS_THEME_SLUG . '-footer-' . $footer,
'name' => __( 'Footer ', 'layerswp' ) . $footer,
'before_widget' => '<section id="%1$s" class="widget %2$s">',
'after_widget' => '</section>',
'before_title' => '<h5 class="section-nav-title">',
'after_title' => '</h5>',
) );
} // for footers
/**
* Register WooCommerce Sidebars
*/
if( class_exists( 'WooCommerce' ) ) {
register_sidebar( array(
'id' => LAYERS_THEME_SLUG . '-left-woocommerce-sidebar',
'name' => __( 'Left Shop Sidebar' , 'layerswp' ),
'description' => __( '' , 'layerswp' ),
'before_widget' => '<aside id="%1$s" class="content well push-bottom-large widget %2$s">',
'after_widget' => '</aside>',
'before_title' => '<h5 class="section-nav-title">',
'after_title' => '</h5>',
) );
register_sidebar( array(
'id' => LAYERS_THEME_SLUG . '-right-woocommerce-sidebar',
'name' => __( 'Right Shop Sidebar' , 'layerswp' ),
'description' => __( '' , 'layerswp' ),
'before_widget' => '<aside id="%1$s" class="content well push-bottom-large widget %2$s">',
'after_widget' => '</aside>',
'before_title' => '<h5 class="section-nav-title">',
'after_title' => '</h5>',
) );
}
}
}
add_action( 'widgets_init' , 'layers_register_standard_sidebars' , 50 );
/**
* Enqueue front end styles and scripts
*/
if( ! function_exists( 'layers_scripts' ) ) {
function layers_scripts(){
/**
* Front end Scripts
*/
wp_enqueue_script(
LAYERS_THEME_SLUG . '-plugins-js' ,
get_template_directory_uri() . '/assets/js/plugins.js',
array(
'jquery',
),
LAYERS_VERSION
); // Sticky-Kit
wp_enqueue_script(
LAYERS_THEME_SLUG . '-framework-js' ,
get_template_directory_uri() . '/assets/js/layers.framework.js',
array(
'jquery',
),
LAYERS_VERSION,
true
); // Framework
if ( is_singular() && comments_open() && get_option( 'thread_comments' ) ) {
wp_enqueue_script( 'comment-reply' );
} // Comment reply script
/**
* Front end Styles
*/
wp_enqueue_style(
LAYERS_THEME_SLUG . '-framework' ,
get_template_directory_uri() . '/assets/css/framework.css',
array() ,
LAYERS_VERSION
);
wp_enqueue_style(
LAYERS_THEME_SLUG . '-components',
get_template_directory_uri() . '/assets/css/components.css',
array(),
LAYERS_VERSION
); // Compontents
wp_enqueue_style(
LAYERS_THEME_SLUG . '-responsive',
get_template_directory_uri() . '/assets/css/responsive.css',
array(),
LAYERS_VERSION
); // Responsive
wp_enqueue_style(
LAYERS_THEME_SLUG . '-icon-fonts',
get_template_directory_uri() . '/assets/css/layers-icons.css',
array(),
LAYERS_VERSION
); // Icon Font
if( class_exists( 'WooCommerce' ) ) {
wp_enqueue_style(
LAYERS_THEME_SLUG . '-woocommerce',
get_template_directory_uri() . '/assets/css/woocommerce.css',
array(),
LAYERS_VERSION
); // Woocommerce
}
wp_enqueue_style(
LAYERS_THEME_SLUG . '-style' ,
get_stylesheet_uri(),
array() ,
LAYERS_VERSION
);
if( is_admin_bar_showing() ) {
wp_enqueue_style(
LAYERS_THEME_SLUG . '-admin',
get_template_directory_uri() . '/core/assets/icons.css',
array(),
LAYERS_VERSION
); // Admin CSS
}
}
}
add_action( 'wp_enqueue_scripts' , 'layers_scripts' );
/**
* Enqueue admin end styles and scripts
*/
if( ! function_exists( 'layers_admin_scripts' ) ) {
function layers_admin_scripts(){
wp_enqueue_style(
LAYERS_THEME_SLUG . '-admin',
get_template_directory_uri() . '/core/assets/admin.css',
array(),
LAYERS_VERSION
); // Admin CSS
wp_enqueue_style(
LAYERS_THEME_SLUG . '-admin-editor',
get_template_directory_uri() . '/core/assets/editor.css',
array(),
LAYERS_VERSION
); // Inline Editor
wp_enqueue_style(
LAYERS_THEME_SLUG . '-admin-font-awesome',
get_template_directory_uri() . '/core/assets/font-awesome.min.css',
array(),
LAYERS_VERSION
); // Inline Editor
wp_enqueue_script(
LAYERS_THEME_SLUG . '-admin-editor' ,
get_template_directory_uri() . '/core/assets/editor.min.js' ,
array( 'jquery' ),
LAYERS_VERSION,
true
); // Inline Editor
wp_enqueue_script(
LAYERS_THEME_SLUG . '-admin-migrator' ,
get_template_directory_uri() . '/core/assets/migrator.js' ,
array(
'media-upload'
),
LAYERS_VERSION,
true
);
wp_localize_script(
LAYERS_THEME_SLUG . '-admin-migrator',
'migratori18n',
array(
'loading_message' => __( 'Be patient while we import the widget data and images.' , 'layerswp' ),
'complete_message' => __( 'Import Complete' , 'layerswp' ),
'importing_message' => __( 'Importing Your Content' , 'layerswp' ),
'duplicate_complete_message' => __( 'Edit Your New Page' , 'layerswp' )
)
);// Migrator// Localize Scripts
wp_localize_script(
LAYERS_THEME_SLUG . '-admin-migrator',
"layers_migrator_params",
array(
'duplicate_layout_nonce' => wp_create_nonce( 'layers-migrator-duplicate' ),
'import_layout_nonce' => wp_create_nonce( 'layers-migrator-import' ),
'preset_layout_nonce' => wp_create_nonce( 'layers-migrator-preset-layouts' ),
)
);
// Onboarding Process
wp_enqueue_script(
LAYERS_THEME_SLUG . '-admin-onboarding' ,
get_template_directory_uri() . '/core/assets/onboarding.js',
array(
'jquery'
),
LAYERS_VERSION,
true
); // Onboarding JS
wp_localize_script(
LAYERS_THEME_SLUG . '-admin-onboarding' ,
"layers_onboarding_params",
array(
'preset_layout_nonce' => wp_create_nonce( 'layers-migrator-preset-layouts' ),
'update_option_nonce' => wp_create_nonce( 'layers-onboarding-update-options' ),
'set_theme_mod_nonce' => wp_create_nonce( 'layers-onboarding-set-theme-mods' ),
)
); // Onboarding ajax parameters
wp_localize_script(
LAYERS_THEME_SLUG . '-admin-onboarding' ,
'onboardingi18n',
array(
'step_saving_message' => __( 'Saving...' , 'layerswp' ),
'step_done_message' => __( 'Done!' , 'layerswp' )
)
); // Onboarding localization
wp_enqueue_script(
LAYERS_THEME_SLUG . '-admin' ,
get_template_directory_uri() . '/core/assets/admin.js',
array(
'jquery',
'jquery-ui-sortable',
'wp-color-picker',
),
LAYERS_VERSION,
true
); // Admin JS
wp_localize_script(
LAYERS_THEME_SLUG . '-admin' ,
"layers_admin_params",
array(
'backup_pages_nonce' => wp_create_nonce( 'layers-backup-pages' ),
'backup_pages_success_message' => __('Your pages have been successfully backed up!', 'layerswp' )
)
); // Onboarding ajax parameters
wp_enqueue_media();
}
}
add_action( 'customize_controls_print_footer_scripts' , 'layers_admin_scripts' );
add_action( 'admin_enqueue_scripts' , 'layers_admin_scripts' );
/**
* Make sure that all excerpts have class="excerpt"
*/
if( !function_exists( 'layers_excerpt_class' ) ) {
function layers_excerpt_class( $excerpt ) {
return str_replace('<p', '<p class="excerpt"', $excerpt);
}
} // layers_excerpt_class
add_filter( "the_excerpt", "layers_excerpt_class" );
add_filter( "get_the_excerpt", "layers_excerpt_class" );
| gpl-2.0 |
SolidRun/lede-project | target/linux/mpc85xx/base-files/lib/mpc85xx.sh | 623 | #!/bin/sh
#
# Copyright (C) 2013 OpenWrt.org
#
MPC85XX_BOARD_NAME=
MPC85XX_MODEL=
mpc85xx_board_detect() {
local model
local name
model=$(awk 'BEGIN{FS="[ \t]+:[ \t]"} /model/ {print $2}' /proc/cpuinfo)
case "$model" in
*"HiveAP-330")
name="hiveap-330"
;;
*"TL-WDR4900 v1")
name="tl-wdr4900-v1"
;;
esac
[ -z "$name" ] && name="unknown"
[ -z "$MPC85XX_BOARD_NAME" ] && MPC85XX_BOARD_NAME="$name"
[ -z "$MPC85XX_MODEL" ] && MPC85XX_MODEL="$model"
[ -e "/tmp/sysinfo/" ] || mkdir -p "/tmp/sysinfo/"
echo "$MPC85XX_BOARD_NAME" > /tmp/sysinfo/board_name
echo "$MPC85XX_MODEL" > /tmp/sysinfo/model
}
| gpl-2.0 |
tossp/lede-k3 | package/lean/mt/drivers/mt7612e/src/mt76x2/common/cmm_loft_cal.c | 39690 | /*
***************************************************************************
* Ralink Tech Inc.
* 4F, No. 2 Technology 5th Rd.
* Science-based Industrial Park
* Hsin-chu, Taiwan, R.O.C.
*
* (c) Copyright 2002-2009, Ralink Technology, Inc.
*
* All rights reserved. Ralink's source code is an unpublished work and the
* use of a copyright notice does not imply otherwise. This source code
* contains confidential trade secret material of Ralink Tech. Any attemp
* or participation in deciphering, decoding, reverse engineering or in any
* way altering the source code is stricitly prohibited, unless the prior
* written consent of Ralink Technology, Inc. is obtained.
***************************************************************************
Module Name:
cmm_loft_cal.c
Abstract:
Tx LOFT calibration and profile related functions
Revision History:
Who When What
-------- ---------- ----------------------------------------------
*/
#include "rt_config.h"
#ifdef RT6352
#define I_PATH 0x0
#define Q_PATH 0x1
#define CHAIN_0 0x0
#define CHAIN_1 0x1
#define RF_ALC_NUM 6
#define CHAIN_NUM 2
#define _BBP_REG_NUM 168
typedef struct _RF_REG_PAIR
{
UCHAR Bank;
UCHAR Register;
UCHAR Value;
} RF_REG_PAIR, *PRF_REG_PAIR;
static VOID RFConfigStore(
IN RTMP_ADAPTER *pAd,
OUT RF_REG_PAIR rf_reg_record[][13],
IN UCHAR chain)
{
UCHAR RFValue = 0;
if (chain == CHAIN_0)
{
/* save before config */
RT635xReadRFRegister(pAd, RF_BANK0, RF_R01, &RFValue);
rf_reg_record[CHAIN_0][0].Bank = RF_BANK0;
rf_reg_record[CHAIN_0][0].Register = RF_R01;
rf_reg_record[CHAIN_0][0].Value = RFValue;
RT635xReadRFRegister(pAd, RF_BANK0, RF_R02, &RFValue);
rf_reg_record[CHAIN_0][1].Bank = RF_BANK0;
rf_reg_record[CHAIN_0][1].Register = RF_R02;
rf_reg_record[CHAIN_0][1].Value = RFValue;
RT635xReadRFRegister(pAd, RF_BANK0, RF_R35, &RFValue);
rf_reg_record[CHAIN_0][2].Bank = RF_BANK0;
rf_reg_record[CHAIN_0][2].Register = RF_R35;
rf_reg_record[CHAIN_0][2].Value = RFValue;
RT635xReadRFRegister(pAd, RF_BANK0, RF_R42, &RFValue);
rf_reg_record[CHAIN_0][3].Bank = RF_BANK0;
rf_reg_record[CHAIN_0][3].Register = RF_R42;
rf_reg_record[CHAIN_0][3].Value = RFValue;
RT635xReadRFRegister(pAd, RF_BANK4, RF_R00, &RFValue);
rf_reg_record[CHAIN_0][4].Bank = RF_BANK4;
rf_reg_record[CHAIN_0][4].Register = RF_R00;
rf_reg_record[CHAIN_0][4].Value = RFValue;
RT635xReadRFRegister(pAd, RF_BANK4, RF_R02, &RFValue);
rf_reg_record[CHAIN_0][5].Bank = RF_BANK4;
rf_reg_record[CHAIN_0][5].Register = RF_R02;
rf_reg_record[CHAIN_0][5].Value = RFValue;
RT635xReadRFRegister(pAd, RF_BANK4, RF_R34, &RFValue);
rf_reg_record[CHAIN_0][6].Bank = RF_BANK4;
rf_reg_record[CHAIN_0][6].Register = RF_R34;
rf_reg_record[CHAIN_0][6].Value = RFValue;
RT635xReadRFRegister(pAd, RF_BANK5, RF_R03, &RFValue);
rf_reg_record[CHAIN_0][7].Bank = RF_BANK5;
rf_reg_record[CHAIN_0][7].Register = RF_R03;
rf_reg_record[CHAIN_0][7].Value = RFValue;
RT635xReadRFRegister(pAd, RF_BANK5, RF_R04, &RFValue);
rf_reg_record[CHAIN_0][8].Bank = RF_BANK5;
rf_reg_record[CHAIN_0][8].Register = RF_R04;
rf_reg_record[CHAIN_0][8].Value = RFValue;
RT635xReadRFRegister(pAd, RF_BANK5, RF_R17, &RFValue);
rf_reg_record[CHAIN_0][9].Bank = RF_BANK5;
rf_reg_record[CHAIN_0][9].Register = RF_R17;
rf_reg_record[CHAIN_0][9].Value = RFValue;
RT635xReadRFRegister(pAd, RF_BANK5, RF_R18, &RFValue);
rf_reg_record[CHAIN_0][10].Bank = RF_BANK5;
rf_reg_record[CHAIN_0][10].Register = RF_R18;
rf_reg_record[CHAIN_0][10].Value = RFValue;
RT635xReadRFRegister(pAd, RF_BANK5, RF_R19, &RFValue);
rf_reg_record[CHAIN_0][11].Bank = RF_BANK5;
rf_reg_record[CHAIN_0][11].Register = RF_R19;
rf_reg_record[CHAIN_0][11].Value = RFValue;
RT635xReadRFRegister(pAd, RF_BANK5, RF_R20, &RFValue);
rf_reg_record[CHAIN_0][12].Bank = RF_BANK5;
rf_reg_record[CHAIN_0][12].Register = RF_R20;
rf_reg_record[CHAIN_0][12].Value = RFValue;
}
else if (chain == CHAIN_1)
{
/* save before config */
RT635xReadRFRegister(pAd, RF_BANK0, RF_R01, &RFValue);
rf_reg_record[CHAIN_1][0].Bank = RF_BANK0;
rf_reg_record[CHAIN_1][0].Register = RF_R01;
rf_reg_record[CHAIN_1][0].Value = RFValue;
RT635xReadRFRegister(pAd, RF_BANK0, RF_R02, &RFValue);
rf_reg_record[CHAIN_1][1].Bank = RF_BANK0;
rf_reg_record[CHAIN_1][1].Register = RF_R02;
rf_reg_record[CHAIN_1][1].Value = RFValue;
RT635xReadRFRegister(pAd, RF_BANK0, RF_R35, &RFValue);
rf_reg_record[CHAIN_1][2].Bank = RF_BANK0;
rf_reg_record[CHAIN_1][2].Register = RF_R35;
rf_reg_record[CHAIN_1][2].Value = RFValue;
RT635xReadRFRegister(pAd, RF_BANK0, RF_R42, &RFValue);
rf_reg_record[CHAIN_1][3].Bank = RF_BANK0;
rf_reg_record[CHAIN_1][3].Register = RF_R42;
rf_reg_record[CHAIN_1][3].Value = RFValue;
RT635xReadRFRegister(pAd, RF_BANK6, RF_R00, &RFValue);
rf_reg_record[CHAIN_1][4].Bank = RF_BANK6;
rf_reg_record[CHAIN_1][4].Register = RF_R00;
rf_reg_record[CHAIN_1][4].Value = RFValue;
RT635xReadRFRegister(pAd, RF_BANK6, RF_R02, &RFValue);
rf_reg_record[CHAIN_1][5].Bank = RF_BANK6;
rf_reg_record[CHAIN_1][5].Register = RF_R02;
rf_reg_record[CHAIN_1][5].Value = RFValue;
RT635xReadRFRegister(pAd, RF_BANK6, RF_R34, &RFValue);
rf_reg_record[CHAIN_1][6].Bank = RF_BANK6;
rf_reg_record[CHAIN_1][6].Register = RF_R34;
rf_reg_record[CHAIN_1][6].Value = RFValue;
RT635xReadRFRegister(pAd, RF_BANK7, RF_R03, &RFValue);
rf_reg_record[CHAIN_1][8].Bank = RF_BANK7;
rf_reg_record[CHAIN_1][8].Register = RF_R03;
rf_reg_record[CHAIN_1][8].Value = RFValue;
RT635xReadRFRegister(pAd, RF_BANK7, RF_R04, &RFValue);
rf_reg_record[CHAIN_1][7].Bank = RF_BANK7;
rf_reg_record[CHAIN_1][7].Register = RF_R04;
rf_reg_record[CHAIN_1][7].Value = RFValue;
RT635xReadRFRegister(pAd, RF_BANK7, RF_R17, &RFValue);
rf_reg_record[CHAIN_1][9].Bank = RF_BANK7;
rf_reg_record[CHAIN_1][9].Register = RF_R17;
rf_reg_record[CHAIN_1][9].Value = RFValue;
RT635xReadRFRegister(pAd, RF_BANK7, RF_R18, &RFValue);
rf_reg_record[CHAIN_1][10].Bank = RF_BANK7;
rf_reg_record[CHAIN_1][10].Register = RF_R18;
rf_reg_record[CHAIN_1][10].Value = RFValue;
RT635xReadRFRegister(pAd, RF_BANK7, RF_R19, &RFValue);
rf_reg_record[CHAIN_1][11].Bank = RF_BANK7;
rf_reg_record[CHAIN_1][11].Register = RF_R19;
rf_reg_record[CHAIN_1][11].Value = RFValue;
RT635xReadRFRegister(pAd, RF_BANK7, RF_R20, &RFValue);
rf_reg_record[CHAIN_1][12].Bank = RF_BANK7;
rf_reg_record[CHAIN_1][12].Register = RF_R20;
rf_reg_record[CHAIN_1][12].Value = RFValue;
}
else
{
DBGPRINT_ERR(("%s: Unknown chain = %u\n", __FUNCTION__, chain));
return;
}
return;
}
static VOID RFConfigRecover(RTMP_ADAPTER *pAd, RF_REG_PAIR RF_record[][13])
{
UCHAR chain_index = 0, record_index = 0;
UCHAR bank = 0, rf_register = 0, value = 0;
for (chain_index = 0; chain_index < 2; chain_index++)
{
for (record_index = 0; record_index < 13; record_index++)
{
bank = RF_record[chain_index][record_index].Bank;
rf_register = RF_record[chain_index][record_index].Register;
value = RF_record[chain_index][record_index].Value;
RT635xWriteRFRegister(pAd, bank, rf_register, value);
DBGPRINT(RT_DEBUG_TRACE, ("bank: %d, rf_register: %d, value: %x\n",
bank, rf_register, value));
}
}
return;
}
static VOID SetBbpToneGenerator(
IN RTMP_ADAPTER *pAd)
{
UCHAR BBPValue = 0;
/* choose FFT tone generator index (to 0) */
BBPValue = 0xaa;
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R158, BBPValue);
BBPValue = 0x00;
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, BBPValue);
/* set subcarrier index (subsample ratio to 10 * 0.3125MHz) */
BBPValue = 0xab;
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R158, BBPValue);
BBPValue = 0x0a;
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, BBPValue);
/* I/Q amplitude of tone gen0 */
BBPValue = 0xac;
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R158, BBPValue); /* I signal */
BBPValue = 0x3f;
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, BBPValue);
BBPValue = 0xad;
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R158, BBPValue); /* Q signal */
BBPValue = 0x3f;
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, BBPValue);
/* enable tone gen */
BBPValue = 0x40;
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R244, BBPValue);
return;
}
UINT32 Do_FFT_Accumulation(IN RTMP_ADAPTER *pAd, UCHAR tone_idx, UCHAR read_neg)
{
UINT32 MacValue = 0;
int fftout_i = 0, fftout_q = 0;
UINT32 power_tmp=0, power_int = 0;
UCHAR BBPValue = 0;
UCHAR tone_idx_int;
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R158, 0x00);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, 0x9b);
BBPValue = 0x9b;
while(BBPValue == 0x9b)
{
RtmpusecDelay(10);
RTMP_BBP_IO_READ8_BY_REG_ID(pAd, BBP_R159, &BBPValue);
BBPValue = BBPValue &0xff;
}
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R158, 0xba);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, tone_idx);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, tone_idx);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, tone_idx);
RTMP_IO_READ32(pAd, 0x057C, &MacValue);
fftout_i = (MacValue >> 16);
fftout_i = (fftout_i & 0x8000) ? (fftout_i - 0x10000) : fftout_i;
fftout_q = (MacValue & 0xffff);
fftout_q = (fftout_q & 0x8000) ? (fftout_q - 0x10000) : fftout_q;
power_tmp = (fftout_i * fftout_i);
power_tmp = power_tmp + (fftout_q * fftout_q);
power_int = power_tmp;
DBGPRINT(RT_DEBUG_TRACE, ("I = %d, Q = %d, power = %x\n", fftout_i, fftout_q, power_int));
if(read_neg){
power_int = power_int >> 1;
tone_idx_int = 0x40 - tone_idx;
tone_idx_int = tone_idx_int & 0x3f;
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R158, 0xba);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, tone_idx_int);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, tone_idx_int);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, tone_idx_int);
RTMP_IO_READ32(pAd, 0x057C, &MacValue);
fftout_i = (MacValue >> 16);
fftout_i = (fftout_i & 0x8000) ? (fftout_i - 0x10000) : fftout_i;
fftout_q = (MacValue & 0xffff);
fftout_q = (fftout_q & 0x8000) ? (fftout_q - 0x10000) : fftout_q;
power_tmp = (fftout_i * fftout_i);
power_tmp = power_tmp + (fftout_q * fftout_q);
power_tmp = power_tmp >> 1;
power_int = power_int + power_tmp;
}
return power_int;
}
#ifdef RT6352_EP_SUPPORT
UINT32 Read_FFT_Accumulation(IN RTMP_ADAPTER *pAd, UCHAR tone_idx)
{
UINT32 MacValue = 0;
int fftout_i = 0, fftout_q = 0;
UINT32 power_tmp=0, power_int = 0;
UCHAR BBPValue = 0;
UCHAR tone_idx_int;
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R158, 0xba);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, tone_idx);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, tone_idx);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, tone_idx);
RTMP_IO_READ32(pAd, 0x057C, &MacValue);
fftout_i = (MacValue >> 16);
fftout_i = (fftout_i & 0x8000) ? (fftout_i - 0x10000) : fftout_i;
fftout_q = (MacValue & 0xffff);
fftout_q = (fftout_q & 0x8000) ? (fftout_q - 0x10000) : fftout_q;
power_tmp = (fftout_i * fftout_i);
power_tmp = power_tmp + (fftout_q * fftout_q);
power_int = power_tmp;
DBGPRINT(RT_DEBUG_TRACE, ("I = %d, Q = %d, power = %x\n", fftout_i, fftout_q, power_int));
return power_int;
}
#endif /* RT6352_EP_SUPPORT */
static VOID Write_DC(
RTMP_ADAPTER *pAd,
UCHAR chain_idx,
UCHAR alc,
UCHAR iorq,
UCHAR dc)
{
UCHAR BBPValue = 0;
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R158, 0xb0);
BBPValue = alc | 0x80;
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, BBPValue);
if (chain_idx == 0)
BBPValue = (iorq == 0) ? 0xb1: 0xb2;
else
BBPValue = (iorq == 0) ? 0xb8: 0xb9;
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R158, BBPValue);
BBPValue = dc;
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, BBPValue);
return;
}
static VOID LOFT_Search(
IN RTMP_ADAPTER *pAd,
IN UCHAR chain_idx,
IN UCHAR alc_idx,
OUT UCHAR dc_result[][RF_ALC_NUM][2])
{
UINT32 pwr0 = 0, pwr1 = 0, pwr_final = 0;
CHAR index0 = 0,index1 = 0;
UCHAR index_final[] = {0x00, 0x00};
UCHAR inverted_bit = 0x20;
UCHAR iorq;
CHAR bit_index;
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R158, 0xb0);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, 0x80);
for (bit_index = 5; bit_index >= 0; bit_index--)
{
for (iorq = 0; iorq <= 1; iorq++)
{
DBGPRINT(RT_DEBUG_INFO, ("========================================================\n"));
if (index_final[iorq] == 0x20)
{
index0 = 0x20;
pwr0 = pwr_final;
}
else
{
index0 = index_final[iorq] - inverted_bit;
index0 = index0 & 0x3f;
Write_DC(pAd, chain_idx, 0, iorq, index0);
pwr0 = Do_FFT_Accumulation(pAd,0xa,0);
}
index1 = index_final[iorq] + ((bit_index == 5) ? 0 : inverted_bit);
index1 = index1 & 0x3f;
Write_DC(pAd, chain_idx, 0, iorq, index1);
pwr1 = Do_FFT_Accumulation(pAd, 0xa,0);
DBGPRINT(RT_DEBUG_INFO, ("alc=%u, IorQ=%u, idx_final=%2x\n", alc_idx, iorq, index_final[iorq]));
DBGPRINT(RT_DEBUG_INFO, ("pwr0=%x, pwr1=%x, pwr_final=%x, idx_0=%x, idx_1=%x, inverted_bit=%x !\n"
,pwr0, pwr1, pwr_final, index0, index1, inverted_bit));
if ((bit_index != 5) && (pwr_final <= pwr0) && (pwr_final < pwr1))
{
pwr_final = pwr_final;
index_final[iorq] = index_final[iorq];
}
else if (pwr0 < pwr1)
{
pwr_final = pwr0;
index_final[iorq] = index0 & 0x3f;
}
else
{
pwr_final = pwr1;
index_final[iorq] = index1 & 0x3f;
}
DBGPRINT(RT_DEBUG_INFO, ("IorQ=%u, idx_final[%u]:%x, pwr_final:%8x\n"
, iorq, iorq, index_final[iorq], pwr_final));
Write_DC(pAd, chain_idx, 0, iorq, index_final[iorq]);
}
inverted_bit = inverted_bit >> 1;
}
dc_result[chain_idx][alc_idx][0] = index_final[0];
dc_result[chain_idx][alc_idx][1] = index_final[1];
return;
}
static VOID IQ_Search(
IN RTMP_ADAPTER *pAd,
IN UCHAR chain_idx,
OUT UCHAR *gain_error_store,
OUT UCHAR *phase_error_store)
{
UINT32 pwr0 = 0, pwr1 = 0, pwr_final = 0;
CHAR phase_err = 0, gain_err = 0, iq_err = 0;
CHAR phase_err_fine = 0, gain_err_fine = 0;
CHAR phase_st,phase_end;
CHAR gain_st,gain_end;
UCHAR inverted_bit = 0x20;
UCHAR first_search = 0x00, touch_neg_max = 0x00;
UCHAR index0 = 0, index1 = 0;
UCHAR gain_or_phase;
UCHAR BBPValue = 0;
CHAR bit_index;
DBGPRINT(RT_DEBUG_ERROR, ("IQCalibration Start!\n"));
for (bit_index = 5; bit_index >= 1; bit_index--)
{
for (gain_or_phase = 0; gain_or_phase < 2; gain_or_phase++)
{
DBGPRINT(RT_DEBUG_INFO, ("========================================================\n"));
if ((gain_or_phase == 1) || (bit_index < 4))
{
if (gain_or_phase == 0)
iq_err = gain_err;
else
iq_err = phase_err;
first_search = (gain_or_phase == 0) ? (bit_index == 3) : (bit_index == 5);
touch_neg_max = (gain_or_phase) ? ((iq_err & 0x0f) == 0x8) : ((iq_err & 0x3f) == 0x20);
if (touch_neg_max)
{
pwr0 = pwr_final;
index0 = iq_err;
}
else
{
index0 = iq_err - inverted_bit;
BBPValue = (chain_idx == 0) ?
((gain_or_phase == 0) ? 0x28 : 0x29):
((gain_or_phase == 0) ? 0x46 : 0x47);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R158, BBPValue);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, index0);
pwr0 = Do_FFT_Accumulation(pAd,0x14,1);
}
index1 = iq_err + (first_search ? 0 : inverted_bit);
index1 = (gain_or_phase == 0) ? (index1 & 0xf) : (index1 & 0x3f);
BBPValue = (chain_idx == 0) ?
(gain_or_phase == 0) ? 0x28 : 0x29 :
(gain_or_phase == 0) ? 0x46 : 0x47;
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R158, BBPValue);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, index1);
pwr1 = Do_FFT_Accumulation(pAd,0x14,1);
DBGPRINT(RT_DEBUG_INFO, ("pwr0=%x, pwr1=%x, pwer_final=%x, index0=%x, index1=%x, iq_err=%x, gain_or_phase=%d, inverted_bit=%x !\n",
pwr0, pwr1, pwr_final, index0, index1, iq_err, gain_or_phase, inverted_bit));
if ((!first_search) && (pwr_final <= pwr0) && (pwr_final < pwr1))
{
pwr_final = pwr_final;
}
else if (pwr0 < pwr1)
{
pwr_final = pwr0;
iq_err = index0;
}
else
{
pwr_final = pwr1;
iq_err = index1;
}
BBPValue = (chain_idx == 0) ?
(gain_or_phase == 0) ? 0x28 : 0x29 :
(gain_or_phase == 0) ? 0x46 : 0x47;
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R158, BBPValue);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, iq_err);
if (gain_or_phase == 0)
gain_err = iq_err;
else
phase_err = iq_err;
DBGPRINT(RT_DEBUG_INFO, ("IQCalibration pwr_final=%8x (%2x, %2x) !\n"
, pwr_final, gain_err & 0xf, phase_err & 0x3f));
} /* end if */
} /* end gain_or_phase */
if(bit_index > 0)
inverted_bit = (inverted_bit >> 1);
}
gain_err = (gain_err & 0x8 ) ? (gain_err & 0xf ) - 0x10 : (gain_err & 0xf );
phase_err = (phase_err & 0x20) ? (phase_err & 0x3f) - 0x40 : (phase_err & 0x3f);
gain_err = (gain_err < -0x7) ? -0x7 :
(gain_err > 0x5) ? 0x5 :
gain_err;
gain_st = gain_err - 1;
gain_end = gain_err + 2;
phase_err =(phase_err < -0x1f) ? -0x1f :
(phase_err > 0x1d) ? 0x1d :
phase_err;
phase_st = phase_err - 1;
phase_end = phase_err + 2;
for(gain_err_fine = gain_st; gain_err_fine <= gain_end; gain_err_fine = gain_err_fine + 1)
for(phase_err_fine = phase_st; phase_err_fine <= phase_end; phase_err_fine = phase_err_fine + 1){
BBPValue = (chain_idx == 0) ? 0x28 : 0x46;
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R158, BBPValue);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, gain_err_fine & 0xf);
BBPValue = (chain_idx == 0) ? 0x29 : 0x47;
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R158, BBPValue);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, phase_err_fine & 0x3f);
pwr1 = Do_FFT_Accumulation(pAd,0x14,1);
if((gain_err_fine == gain_st) && (phase_err_fine == phase_st)){
pwr_final = pwr1;
gain_err = gain_err_fine;
phase_err = phase_err_fine;
}
else if(pwr_final > pwr1){
pwr_final = pwr1;
gain_err = gain_err_fine;
phase_err = phase_err_fine;
}
DBGPRINT(RT_DEBUG_INFO, ("Fine IQCalibration pwr1=%8x pwr_final=%8x (%2x, %2x) !\n",
pwr1, pwr_final, gain_err_fine & 0xf, phase_err_fine & 0x3f));
}
gain_error_store[chain_idx] = gain_err & 0xf;
phase_error_store[chain_idx] = phase_err & 0x3f;
DBGPRINT(RT_DEBUG_ERROR, ("IQCalibration Done! CH = %u, (gain=%2x, phase=%2x)\n"
, chain_idx, gain_err & 0xf, phase_err & 0x3f));
return;
}
static VOID RF_AUX_TX0_LOOPBACK(RTMP_ADAPTER *pAd)
{
RT635xWriteRFRegister(pAd, RF_BANK0, RF_R01, 0x21);
RT635xWriteRFRegister(pAd, RF_BANK0, RF_R02, 0x10);
RT635xWriteRFRegister(pAd, RF_BANK0, RF_R35, 0x00);
RT635xWriteRFRegister(pAd, RF_BANK0, RF_R42, 0x1b);
RT635xWriteRFRegister(pAd, RF_BANK4, RF_R00, 0x81);
RT635xWriteRFRegister(pAd, RF_BANK4, RF_R02, 0x81);
RT635xWriteRFRegister(pAd, RF_BANK4, RF_R34, 0xee);
RT635xWriteRFRegister(pAd, RF_BANK5, RF_R03, 0x2d);
RT635xWriteRFRegister(pAd, RF_BANK5, RF_R04, 0x2d);
RT635xWriteRFRegister(pAd, RF_BANK5, RF_R17, 0x80);
RT635xWriteRFRegister(pAd, RF_BANK5, RF_R18, 0xd7);
RT635xWriteRFRegister(pAd, RF_BANK5, RF_R19, 0xa2);
RT635xWriteRFRegister(pAd, RF_BANK5, RF_R20, 0x20);
}
static VOID RF_AUX_TX1_LOOPBACK(RTMP_ADAPTER *pAd)
{
RT635xWriteRFRegister(pAd, RF_BANK0, RF_R01, 0x22);
RT635xWriteRFRegister(pAd, RF_BANK0, RF_R02, 0x20);
RT635xWriteRFRegister(pAd, RF_BANK0, RF_R35, 0x00);
RT635xWriteRFRegister(pAd, RF_BANK0, RF_R42, 0x4b);
RT635xWriteRFRegister(pAd, RF_BANK6, RF_R00, 0x81);
RT635xWriteRFRegister(pAd, RF_BANK6, RF_R02, 0x81);
RT635xWriteRFRegister(pAd, RF_BANK6, RF_R34, 0xee);
RT635xWriteRFRegister(pAd, RF_BANK7, RF_R03, 0x2d);
RT635xWriteRFRegister(pAd, RF_BANK7, RF_R04, 0x2d);
RT635xWriteRFRegister(pAd, RF_BANK7, RF_R17, 0x80);
RT635xWriteRFRegister(pAd, RF_BANK7, RF_R18, 0xd7);
RT635xWriteRFRegister(pAd, RF_BANK7, RF_R19, 0xa2);
RT635xWriteRFRegister(pAd, RF_BANK7, RF_R20, 0x20);
}
VOID LOFT_IQ_Calibration(RTMP_ADAPTER *pAd)
{
RF_REG_PAIR RF_Store[CHAIN_NUM][13];
UINT32 MacOrg1 = 0; /* TX_PIN_CFG */
UINT32 MacOrg2 = 0; /* RF_CONTROL0 */
UINT32 MacOrg3 = 0; /* RTMP_RF_BYPASS0 */
UINT32 MacOrg4 = 0; /* RF_CONTROL3 */
UINT32 MacOrg5 = 0; /* RF_BYPASS3 */
UINT32 ORIG528 = 0;
UINT32 ORIG52C = 0;
UINT32 saveMacSysCtrl = 0,MTxCycle = 0;
UINT32 MacValue = 0;
UINT32 Mac13b8 = 0;
// UINT32 table_dc_i0 = 0;
// UINT32 table_dc_q0 = 0;
// UINT32 table_dc_i1 = 0;
// UINT32 table_dc_q1 = 0;
UINT32 pwr0 = 0, pwr1 = 0;
#ifdef RT6352_EP_SUPPORT
UINT32 pwr0_idx10= 0, pwr1_idx10 = 0;
#endif /* RT6352_EP_SUPPORT */
UCHAR RFValue;
UCHAR LOFT_DC_Search_Result[CHAIN_NUM][RF_ALC_NUM][2]; /* 0: I_PATH; 1: Q_PATH */
UCHAR gain_error_result[CHAIN_NUM], phase_error_result[CHAIN_NUM];
UCHAR RF_gain[] = {0x0, 0x1, 0x2, 0x4,0x8,0xc};
UCHAR RFVGA_gain_table[]=
{ 0x24, 0x25, 0x26, 0x27, //[0:2:6]
0x28, 0x2c, 0x2d, 0x2e, //[8:2:14]
0x2f, 0x30, 0x31, 0x38, //[16:2:22]
0x39, 0x3a, 0x3b, 0x3c, //[24:2:30]
0x3d, 0x3e, 0x3f }; //[32:2:36]
// UCHAR VGA_gain[] = {0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3bi;
CHAR VGA_gain[] = {14, 14};
UCHAR BBP_2324gain[] = {0x16, 0x14, 0x12, 0x10, 0xc, 0x8};
UCHAR BBPValue = 0, chain_idx = 0, rf_alc_idx = 0, idx = 0;
UCHAR BBPR30Value, RFB0_R39, RFB0_R42;
#ifdef RT6352_EP_SUPPORT
UCHAR BBP_R1_Value;
UCHAR BBP_R4_Value;
UCHAR BBPR241, BBPR242;
#endif /* RT6352_EP_SUPPORT */
UCHAR count_step;
//RF self Tx DC calibration
//RF_SELF_TXDC_CAL(pAd);
/* backup before MAC RF Interface config */
RTMP_IO_READ32(pAd, MAC_SYS_CTRL, &saveMacSysCtrl);
RTMP_IO_READ32(pAd, TX_PIN_CFG, &MacOrg1);
RTMP_IO_READ32(pAd, RF_CONTROL0, &MacOrg2);
RTMP_IO_READ32(pAd, RTMP_RF_BYPASS0, &MacOrg3);
RTMP_IO_READ32(pAd, RF_CONTROL3, &MacOrg4);
RTMP_IO_READ32(pAd, RF_BYPASS3, &MacOrg5);
RTMP_IO_READ32(pAd, 0x13b8 , &Mac13b8);
RTMP_IO_READ32(pAd, RF_CONTROL2, &ORIG528);
RTMP_IO_READ32(pAd, RF_BYPASS2, &ORIG52C);
/* MAC Tx */
RTMP_IO_READ32(pAd, MAC_SYS_CTRL, &MacValue);
MacValue &= (~0x04);
RTMP_IO_WRITE32(pAd, MAC_SYS_CTRL, MacValue);
for (MTxCycle = 0; MTxCycle < 10000; MTxCycle++)
{
RTMP_IO_READ32(pAd, MAC_STATUS_CFG, &MacValue);
if (MacValue & 0x1)
RtmpusecDelay(50);
else
break;
}
/* MAC Rx */
RTMP_IO_READ32(pAd, MAC_SYS_CTRL, &MacValue);
MacValue &= (~0x08);
RTMP_IO_WRITE32(pAd, MAC_SYS_CTRL, MacValue);
for (MTxCycle = 0; MTxCycle < 10000; MTxCycle++)
{
RTMP_IO_READ32(pAd, MAC_STATUS_CFG, &MacValue);
if (MacValue & 0x2)
RtmpusecDelay(50);
else
break;
}
/*
step 1: manually turn on ADC8, ADC6, PA, Tx, Rx,
and bypass ALC control
*/
/* backup RF registers before config */
for (chain_idx = 0; chain_idx < 2; chain_idx++)
{
RFConfigStore(pAd, RF_Store, chain_idx);
}
/* Backup ADC clcok selection */
RTMP_BBP_IO_READ8_BY_REG_ID(pAd, BBP_R30, &BBPR30Value);
RT635xReadRFRegister(pAd, RF_BANK0, RF_R39, &RFB0_R39);
RT635xReadRFRegister(pAd, RF_BANK0, RF_R42, &RFB0_R42);
/* change to Shielding clock */
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R30, 0x1F);
RT635xWriteRFRegister(pAd, RF_BANK0, RF_R39, 0x80);
RT635xWriteRFRegister(pAd, RF_BANK0, RF_R42, 0x5B);
/* step 7: set BBP tone generator */
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R23, 0x0);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R24, 0x0);
SetBbpToneGenerator(pAd);
for( chain_idx = 0; chain_idx < 2; chain_idx ++)
{
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R23, 0x0);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R24, 0x0);
RTMP_IO_WRITE32(pAd, MAC_SYS_CTRL, 0x0);
RTMP_IO_WRITE32(pAd, TX_PIN_CFG, 0x0000000f);
RTMP_IO_WRITE32(pAd, RF_CONTROL0, 0x00000004);
RTMP_IO_WRITE32(pAd, RTMP_RF_BYPASS0, 0x00003306);
RTMP_IO_WRITE32(pAd, 0x13b8 , 0x10);
RtmpusecDelay(1);
if (chain_idx == 0)
{
/* step 2: set RF loopback for ANT0 */
RF_AUX_TX0_LOOPBACK(pAd);
}
else
{
/* step 4: set RF loopback for ANT1 */
RF_AUX_TX1_LOOPBACK(pAd);
}
RtmpusecDelay(1);
if(chain_idx == 0)
{
RTMP_IO_WRITE32(pAd, RF_CONTROL0, 0x00001004);
}
else
{
RTMP_IO_WRITE32(pAd, RF_CONTROL0, 0x00002004);
}
/* step 8: set accumulation length */
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R158, 0x05);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, 0x00);
/* step 9: set chain index */
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R158, 0x01);
if(chain_idx == 0)
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, 0x00); /* for pair 0 */
else
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, 0x01); /* for pair 0 */
/* step 10: for loop of chain 0 with rf_alc_idx */
VGA_gain[chain_idx] = 18;
for (rf_alc_idx = 0; rf_alc_idx < 3; rf_alc_idx++)
{
/* step 13: manually set BBP digital power control */
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R23, BBP_2324gain[rf_alc_idx]);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R24, BBP_2324gain[rf_alc_idx]);
/* step 11: manually set RF ALC code */
RTMP_IO_READ32(pAd, RF_CONTROL3, &MacValue); /* fix RF ALC */
MacValue &= (~0x0000f1f1);
MacValue |= (RF_gain[rf_alc_idx] << 4);
MacValue |= (RF_gain[rf_alc_idx] << 12);
RTMP_IO_WRITE32(pAd, RF_CONTROL3, MacValue);
MacValue = (0x0000f1f1);
RTMP_IO_WRITE32(pAd, RF_BYPASS3, MacValue);
/* step 12: manually set RF VGA gain */
if(rf_alc_idx == 0) {
Write_DC(pAd, chain_idx, 0, 1, 0x21);
for(;VGA_gain[chain_idx] > 0;VGA_gain[chain_idx] = VGA_gain[chain_idx] -2){
RFValue = RFVGA_gain_table[VGA_gain[chain_idx]];
RT635xWriteRFRegister(pAd, RF_BANK5, RF_R03, RFValue);
RT635xWriteRFRegister(pAd, RF_BANK5, RF_R04, RFValue);
RT635xWriteRFRegister(pAd, RF_BANK7, RF_R03, RFValue);
RT635xWriteRFRegister(pAd, RF_BANK7, RF_R04, RFValue);
Write_DC(pAd, chain_idx, 0, 1, 0x0);
Write_DC(pAd, chain_idx, 0, 0, 0x0);
pwr0 = Do_FFT_Accumulation(pAd, 0xa,0);
Write_DC(pAd, chain_idx, 0, 0, 0x21);
pwr1 = Do_FFT_Accumulation(pAd, 0xa,0);
DBGPRINT(RT_DEBUG_TRACE, ("LOFT AGC %d %d\n",pwr0,pwr1));
if((pwr0 < 7000*7000) && (pwr1 < (7000*7000))){
break;
}
}
/* for alc = 0 calibration, clean the dc value */
Write_DC(pAd, chain_idx, 0, 0, 0x0);
Write_DC(pAd, chain_idx, 0, 1, 0x0);
DBGPRINT(RT_DEBUG_TRACE, ("Used VGA %d %x\n",VGA_gain[chain_idx], RFVGA_gain_table[VGA_gain[chain_idx]]));
if(VGA_gain[chain_idx] < 0)
VGA_gain[chain_idx] = 0;
}
RFValue = RFVGA_gain_table[VGA_gain[chain_idx]];
RT635xWriteRFRegister(pAd, RF_BANK5, RF_R03, RFValue);
RT635xWriteRFRegister(pAd, RF_BANK5, RF_R04, RFValue);
RT635xWriteRFRegister(pAd, RF_BANK7, RF_R03, RFValue);
RT635xWriteRFRegister(pAd, RF_BANK7, RF_R04, RFValue);
/* step 14, step 15, and step 16: search */
LOFT_Search(pAd, chain_idx, rf_alc_idx, LOFT_DC_Search_Result);
}
}
/* step 19: write back compensate value */
for (rf_alc_idx = 0; rf_alc_idx < 3; rf_alc_idx++)
{
// DBGPRINT(RT_DEBUG_OFF, ("dc_result[0][%u][0]=%x, dc_result[0][%u][1]=%x\n"
// , rf_alc_idx, LOFT_DC_Search_Result[0][rf_alc_idx][0], rf_alc_idx
// , LOFT_DC_Search_Result[0][rf_alc_idx][1]));
// DBGPRINT(RT_DEBUG_OFF, ("dc_result[1][%u][0]=%x, dc_result[1][%u][1]=%x\n"
// , rf_alc_idx, LOFT_DC_Search_Result[1][rf_alc_idx][0], rf_alc_idx
// , LOFT_DC_Search_Result[1][rf_alc_idx][1]));
for (idx = 0; idx < 4; idx++)
{
// table_dc_i0 = LOFT_DC_Search_Result[0][(idx == 0) ? 0 : (idx+2)][0] - LOFT_DC_Search_Result[0][0][0];
// table_dc_q0 = LOFT_DC_Search_Result[0][(idx == 0) ? 0 : (idx+2)][1] - LOFT_DC_Search_Result[0][0][1];
// table_dc_i1 = LOFT_DC_Search_Result[1][(idx == 0) ? 0 : (idx+2)][0] - LOFT_DC_Search_Result[1][0][0];
// table_dc_q1 = LOFT_DC_Search_Result[1][(idx == 0) ? 0 : (idx+2)][1] - LOFT_DC_Search_Result[1][0][1];
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R158, 0xb0);
BBPValue = (idx<<2) + rf_alc_idx;
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, BBPValue);
DBGPRINT(RT_DEBUG_TRACE, (" ALC %2x,", BBPValue));
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R158, 0xb1);
BBPValue = LOFT_DC_Search_Result[CHAIN_0][rf_alc_idx][I_PATH];
BBPValue = BBPValue & 0x3f;
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, BBPValue);
DBGPRINT(RT_DEBUG_TRACE, (" I0 %2x,", BBPValue));
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R158, 0xb2);
BBPValue = LOFT_DC_Search_Result[CHAIN_0][rf_alc_idx][Q_PATH];
BBPValue = BBPValue & 0x3f;
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, BBPValue);
DBGPRINT(RT_DEBUG_TRACE, (" Q0 %2x,", BBPValue));
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R158, 0xb8);
BBPValue = LOFT_DC_Search_Result[CHAIN_1][rf_alc_idx][I_PATH];
BBPValue = BBPValue & 0x3f;
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, BBPValue);
DBGPRINT(RT_DEBUG_TRACE, (" I1 %2x,", BBPValue));
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R158, 0xb9);
BBPValue = LOFT_DC_Search_Result[CHAIN_1][rf_alc_idx][Q_PATH];
BBPValue = BBPValue & 0x3f;
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, BBPValue);
DBGPRINT(RT_DEBUG_TRACE, (" Q1 %2x\n", BBPValue));
}
}
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R23, 0x0);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R24, 0x0);
RTMP_IO_WRITE32(pAd, RF_CONTROL0, 0x04);
/* change BBP Tx to normal state */
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R158, 0x00);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, 0x00);
BBPValue = 0x00;
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R244, 0x00);
/* BBP soft reset */
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R21, 0x01);
RtmpusecDelay(1); /* wait 1 usec */
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R21, 0x00);
/* recover RF registers */
RFConfigRecover(pAd, RF_Store);
/* recover MAC registers */
RTMP_IO_WRITE32(pAd, TX_PIN_CFG, MacOrg1);
RTMP_IO_WRITE32(pAd, RF_CONTROL0, 0x04);
RTMP_IO_WRITE32(pAd, RF_CONTROL0, 0x00);
RTMP_IO_WRITE32(pAd, RTMP_RF_BYPASS0, 0x0);
RTMP_IO_WRITE32(pAd, RF_CONTROL0, MacOrg2);
RtmpusecDelay(1);
RTMP_IO_WRITE32(pAd, RTMP_RF_BYPASS0, MacOrg3);
RTMP_IO_WRITE32(pAd, RF_CONTROL3, MacOrg4);
RTMP_IO_WRITE32(pAd, RF_BYPASS3, MacOrg5);
RTMP_IO_WRITE32(pAd, MAC_SYS_CTRL, saveMacSysCtrl);
RTMP_IO_WRITE32(pAd, RF_CONTROL2, ORIG528);
RTMP_IO_WRITE32(pAd, RF_BYPASS2, ORIG52C);
RTMP_IO_WRITE32(pAd, 0x13b8 , Mac13b8);
DBGPRINT(RT_DEBUG_ERROR, ("LOFT Calibration Done!\n"));
/*************************************************************************/
/* start I/Q calibration */
/* backup before MAC RF Interface config */
RTMP_IO_READ32(pAd, MAC_SYS_CTRL, &saveMacSysCtrl);
RTMP_IO_READ32(pAd, TX_PIN_CFG, &MacOrg1);
RTMP_IO_READ32(pAd, RF_CONTROL0, &MacOrg2);
RTMP_IO_READ32(pAd, RTMP_RF_BYPASS0, &MacOrg3);
RTMP_IO_READ32(pAd, RF_CONTROL3, &MacOrg4);
RTMP_IO_READ32(pAd, RF_BYPASS3, &MacOrg5);
#ifdef RT6352_EP_SUPPORT
RTMP_BBP_IO_READ8_BY_REG_ID(pAd, BBP_R1, &BBP_R1_Value);
RTMP_BBP_IO_READ8_BY_REG_ID(pAd, BBP_R4, &BBP_R4_Value);
RTMP_BBP_IO_READ8_BY_REG_ID(pAd, BBP_R241, &BBPR241);
RTMP_BBP_IO_READ8_BY_REG_ID(pAd, BBP_R242, &BBPR242);
#endif /* RT6352_EP_SUPPORT */
RTMP_IO_READ32(pAd, 0x13b8 , &Mac13b8);
/* MAC Tx */
RTMP_IO_READ32(pAd, MAC_SYS_CTRL, &MacValue);
MacValue &= (~0x04);
RTMP_IO_WRITE32(pAd, MAC_SYS_CTRL, MacValue);
for (MTxCycle = 0; MTxCycle < 10000; MTxCycle++)
{
RTMP_IO_READ32(pAd, MAC_STATUS_CFG, &MacValue);
if (MacValue & 0x1)
RtmpusecDelay(50);
else
break;
}
/* MAC Rx */
RTMP_IO_READ32(pAd, MAC_SYS_CTRL, &MacValue);
MacValue &= (~0x08);
RTMP_IO_WRITE32(pAd, MAC_SYS_CTRL, MacValue);
for (MTxCycle = 0; MTxCycle < 10000; MTxCycle++)
{
RTMP_IO_READ32(pAd, MAC_STATUS_CFG, &MacValue);
if (MacValue & 0x2)
RtmpusecDelay(50);
else
break;
}
/*
step 1: manually turn on ADC8, ADC6, PA, Tx, Rx,
and bypass ALC control
*/
#ifdef RT6352_EP_SUPPORT
RTMP_IO_WRITE32(pAd, RF_CONTROL3, 0x00000101);
RTMP_IO_WRITE32(pAd, RF_BYPASS3 , 0x0000f1f1);
#endif /* RT6352_EP_SUPPORT */
/* manually set RF ALC code */
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R23, 0x0);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R24, 0x0);
#ifdef RT6352_EP_SUPPORT
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R4, BBP_R4_Value & (~0x18));
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R21, 0x1);
RtmpusecDelay(1); /* wait 1 usec */
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R21, 0x0);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R241, 0x14);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R242, 0x80);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R244, 0x31);
#else
SetBbpToneGenerator(pAd);
#endif /* RT6352_EP_SUPPORT */
RTMP_IO_WRITE32(pAd, RF_CONTROL0, 0x00000004);
RTMP_IO_WRITE32(pAd, RTMP_RF_BYPASS0, 0x00003306);
RtmpusecDelay(1);
RTMP_IO_WRITE32(pAd, TX_PIN_CFG , 0x0000000f);
#ifndef RT6352_EP_SUPPORT
RTMP_IO_WRITE32(pAd, RF_CONTROL3, 0x00000000);
RTMP_IO_WRITE32(pAd, RF_BYPASS3 , 0x0000f1f1);
#endif /* RT6352_EP_SUPPORT */
RTMP_IO_WRITE32(pAd, 0x13b8, 0x00000010);
/* backup before RF registers before config */
for (chain_idx = 0; chain_idx < 2; chain_idx++)
{
RFConfigStore(pAd, RF_Store, chain_idx);
}
/* step 3: set BBP tone generator */
/* step 6: manually set RF VGA gain */
RT635xWriteRFRegister(pAd, RF_BANK5, RF_R03, 0x3b);
RT635xWriteRFRegister(pAd, RF_BANK5, RF_R04, 0x3b);
RT635xWriteRFRegister(pAd, RF_BANK7, RF_R03, 0x3b);
RT635xWriteRFRegister(pAd, RF_BANK7, RF_R04, 0x3b);
/* step 9: enable Amp/Phase IQ compensation & LOFT compensation */
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R158, 0x03);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, 0x60);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R158, 0xb0);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, 0x80);
for (chain_idx = 0; chain_idx < 2; chain_idx ++)
{
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R23, 0x0);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R24, 0x0);
if (chain_idx == 0)
{
/* step 2: set RF loopback for ANT0 */
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R158, 0x01);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, 0x00); /* for pair 0 */
#ifdef RT6352_EP_SUPPORT
// BBP only Tx0
BBPValue = BBP_R1_Value & (~ 0x18);
BBPValue = BBPValue | 0x00;
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R1, BBPValue);
#endif /* RT6352_EP_SUPPORT */
RF_AUX_TX0_LOOPBACK(pAd);
}
else
{
/* step 2: set RF loopback for ANT1 */
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R158, 0x01);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, 0x01); /* for pair 0 */
#ifdef RT6352_EP_SUPPORT
// BBP only Tx1
BBPValue = BBP_R1_Value & (~ 0x18);
BBPValue = BBPValue | 0x08;
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R1, BBPValue);
#endif /* RT6352_EP_SUPPORT */
RF_AUX_TX1_LOOPBACK(pAd);
}
if(chain_idx == 0)
{
RTMP_IO_WRITE32(pAd, RF_CONTROL0, 0x00001004);
}
else
{
RTMP_IO_WRITE32(pAd, RF_CONTROL0, 0x00002004);
}
/* step 8: set calibration length */
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R158, 0x05);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, 0x04);
/* step 10: search for chain 0 */
// clear gain value
BBPValue = (chain_idx == 0) ? 0x28 : 0x46;
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R158, BBPValue);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, 0x0);
#ifdef RT6352_EP_SUPPORT
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R23, 0x6);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R24, 0x6);
#else
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R23, 0x1F);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R24, 0x1F);
#endif /* RT6352_EP_SUPPORT */
#ifdef RT6352_EP_SUPPORT
count_step = 1;
#else
count_step = 2;
#endif /* RT6352_EP_SUPPORT */
for(;VGA_gain[chain_idx] < 19; VGA_gain[chain_idx]=(VGA_gain[chain_idx]+ count_step)){
RFValue = RFVGA_gain_table[VGA_gain[chain_idx]];
RT635xWriteRFRegister(pAd, RF_BANK5, RF_R03, RFValue);
RT635xWriteRFRegister(pAd, RF_BANK5, RF_R04, RFValue);
RT635xWriteRFRegister(pAd, RF_BANK7, RF_R03, RFValue);
RT635xWriteRFRegister(pAd, RF_BANK7, RF_R04, RFValue);
BBPValue = (chain_idx == 0) ? 0x29 : 0x47;
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R158, BBPValue);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, 0x0);
pwr0 = Do_FFT_Accumulation(pAd,0x14,0);
#ifdef RT6352_EP_SUPPORT
pwr0_idx10 = Read_FFT_Accumulation(pAd,0xa);
#endif /* RT6352_EP_SUPPORT */
BBPValue = (chain_idx == 0) ? 0x29 : 0x47;
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R158, BBPValue);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, 0x21);
pwr1 = Do_FFT_Accumulation(pAd,0x14,0);
#ifdef RT6352_EP_SUPPORT
pwr1_idx10 = Read_FFT_Accumulation(pAd,0xa);
#endif /* RT6352_EP_SUPPORT */
DBGPRINT(RT_DEBUG_TRACE, ("IQ AGC %d %d\n",pwr0,pwr1));
#ifdef RT6352_EP_SUPPORT
DBGPRINT(RT_DEBUG_TRACE, ("IQ AGC IDX 10 %d %d\n",pwr0_idx10,pwr1_idx10));
if((pwr0_idx10 > 7000*7000) ||(pwr1_idx10 > 7000*7000)){
if(VGA_gain[chain_idx]!=0)
VGA_gain[chain_idx] = VGA_gain[chain_idx]-1;
break;
}
#endif /* RT6352_EP_SUPPORT */
if((pwr0 >2500*2500) || (pwr1 > 2500*2500)){
break;
}
}
if (VGA_gain[chain_idx] > 18)
VGA_gain[chain_idx] = 18;
DBGPRINT(RT_DEBUG_TRACE, ("Used VGA %d %x\n",VGA_gain[chain_idx], RFVGA_gain_table[VGA_gain[chain_idx]]));
BBPValue = (chain_idx == 0) ? 0x29 : 0x47;
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R158, BBPValue);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, 0x0);
IQ_Search(pAd, chain_idx, gain_error_result, phase_error_result);
}
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R23, 0x0);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R24, 0x0);
RTMP_IO_WRITE32(pAd, RF_CONTROL0, 0x04);
/* step 19: write back compensate value */
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R158, 0x28);
BBPValue = gain_error_result[CHAIN_0] & 0x0f;
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, BBPValue);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R158, 0x29);
BBPValue = phase_error_result[CHAIN_0] & 0x3f;
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, BBPValue);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R158, 0x46);
BBPValue = gain_error_result[CHAIN_1] & 0x0f;
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, BBPValue);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R158, 0x47);
BBPValue = phase_error_result[CHAIN_1] & 0x3f;
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, BBPValue);
#ifdef RT6352_EP_SUPPORT
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R1, BBP_R1_Value);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R241, BBPR241);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R242, BBPR242);
#endif /* RT6352_EP_SUPPORT */
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R244, 0x00);
/* change BBP Tx to normal state */
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R158, 0x00);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, 0x00);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R158, 0xb0);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R159, 0x00);
/* restore ADC clcok selection */
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R30, BBPR30Value);
RT635xWriteRFRegister(pAd, RF_BANK0, RF_R39, RFB0_R39);
RT635xWriteRFRegister(pAd, RF_BANK0, RF_R42, RFB0_R42);
#ifdef RT6352_EP_SUPPORT
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R4, BBP_R4_Value);
#endif /* RT6352_EP_SUPPORT */
/* BBP soft reset */
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R21, 0x01);
RtmpusecDelay(1); /* wait 1 usec */
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R21, 0x00);
/* recover RF registers */
RFConfigRecover(pAd, RF_Store);
/* recover MAC registers */
RTMP_IO_WRITE32(pAd, TX_PIN_CFG, MacOrg1);
RTMP_IO_WRITE32(pAd, RF_CONTROL0, 0x0);
RTMP_IO_WRITE32(pAd, RTMP_RF_BYPASS0 , 0x0);
RTMP_IO_WRITE32(pAd, RF_CONTROL0, MacOrg2);
RtmpusecDelay(1);
RTMP_IO_WRITE32(pAd, RTMP_RF_BYPASS0, MacOrg3);
RTMP_IO_WRITE32(pAd, RF_CONTROL3, MacOrg4);
RTMP_IO_WRITE32(pAd, RF_BYPASS3, MacOrg5);
RTMP_IO_WRITE32(pAd, MAC_SYS_CTRL, saveMacSysCtrl);
RTMP_IO_WRITE32(pAd, 0x13b8 , Mac13b8);
DBGPRINT(RT_DEBUG_ERROR, ("TX IQ Calibration Done!\n"));
return;
}
#endif /* RT6352 */
| gpl-2.0 |
laborautonomo/rivendell | lib/rdmarker_edit.h | 1284 | // rdmarkeredit.h
//
// An flashing button widget.
//
// (C) Copyright 2002 Fred Gleason <[email protected]>
//
// $Id: rdmarker_edit.h,v 1.7 2010/07/29 19:32:33 cvs Exp $
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License version 2 as
// published by the Free Software Foundation.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public
// License along with this program; if not, write to the Free Software
// Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
//
//
#ifndef RDMARKEREDIT_H
#define RDMARKEREDIT_H
#include <qwidget.h>
#include <qlineedit.h>
#include <qpixmap.h>
#include <qcolor.h>
class RDMarkerEdit : public QLineEdit
{
Q_OBJECT
public:
RDMarkerEdit(QWidget *parent,const char *name);
RDMarkerEdit(const QString &contents,QWidget *parent,const char *name);
signals:
void escapePressed();
protected:
void keyPressEvent(QKeyEvent *e);
};
#endif // RDMARKEREDIT_H
| gpl-2.0 |
pcengines/coreboot | src/mainboard/amd/south_station/OemCustomize.c | 3351 | /* SPDX-License-Identifier: GPL-2.0-only */
#include <AGESA.h>
#include <northbridge/amd/agesa/state_machine.h>
#include <PlatformMemoryConfiguration.h>
static const PCIe_PORT_DESCRIPTOR PortList[] = {
// Initialize Port descriptor (PCIe port, Lanes 4, PCI Device Number 4, ...)
{
0,
PCIE_ENGINE_DATA_INITIALIZER(PciePortEngine, 4, 4),
PCIE_PORT_DATA_INITIALIZER(PortEnabled, ChannelTypeExt6db, 4,
HotplugDisabled,
PcieGen2,
PcieGen2,
AspmL0sL1, 4)
},
// Initialize Port descriptor (PCIe port, Lanes 5, PCI Device Number 5, ...)
{
0,
PCIE_ENGINE_DATA_INITIALIZER(PciePortEngine, 5, 5),
PCIE_PORT_DATA_INITIALIZER(PortEnabled, ChannelTypeExt6db, 5,
HotplugDisabled,
PcieGen2,
PcieGen2,
AspmL0sL1, 5)
},
// Initialize Port descriptor (PCIe port, Lanes 6, PCI Device Number 6, ...)
{
0,
PCIE_ENGINE_DATA_INITIALIZER(PciePortEngine, 6, 6),
PCIE_PORT_DATA_INITIALIZER(PortEnabled, ChannelTypeExt6db, 6,
HotplugDisabled,
PcieGen2,
PcieGen2,
AspmL0sL1, 6)
},
// Initialize Port descriptor (PCIe port, Lanes 7, PCI Device Number 7, ...)
{
0,
PCIE_ENGINE_DATA_INITIALIZER(PciePortEngine, 7, 7),
PCIE_PORT_DATA_INITIALIZER(PortEnabled, ChannelTypeExt6db, 7,
HotplugDisabled,
PcieGen2,
PcieGen2,
AspmL0sL1, 7)
},
// Initialize Port descriptor (PCIe port, Lanes 8, PCI Device Number 8, ...)
{
DESCRIPTOR_TERMINATE_LIST,
PCIE_ENGINE_DATA_INITIALIZER(PciePortEngine, 0, 3),
PCIE_PORT_DATA_INITIALIZER(PortEnabled, ChannelTypeExt6db, 8,
HotplugDisabled,
PcieGen2,
PcieGen2,
AspmL0sL1, 0)
}
};
static const PCIe_DDI_DESCRIPTOR DdiList[] = {
/* Initialize Ddi descriptor (DDI interface Lanes 12:15, DdB, ...) DP1 HDMI */
{
0,
PCIE_ENGINE_DATA_INITIALIZER(PcieDdiEngine, 12, 15),
PCIE_DDI_DATA_INITIALIZER(ConnectorTypeHDMI, Aux2, Hdp2)
},
/* Initialize Ddi descriptor (DDI interface Lanes 8:11, DdA, ...) DP0 VGA */
{
DESCRIPTOR_TERMINATE_LIST,
PCIE_ENGINE_DATA_INITIALIZER(PcieDdiEngine, 8, 11),
PCIE_DDI_DATA_INITIALIZER(ConnectorTypeCrt, Aux1, Hdp1)
}
};
static const PCIe_COMPLEX_DESCRIPTOR PcieComplex = {
.Flags = DESCRIPTOR_TERMINATE_LIST,
.SocketId = 0,
.PciePortList = PortList,
.DdiLinkList = DdiList,
};
void board_BeforeInitEarly(struct sysinfo *cb, AMD_EARLY_PARAMS *InitEarly)
{
InitEarly->GnbConfig.PcieComplexList = &PcieComplex;
InitEarly->GnbConfig.PsppPolicy = 0;
}
/*----------------------------------------------------------------------------------------
* CUSTOMER OVERRIDES MEMORY TABLE
*----------------------------------------------------------------------------------------
*/
/*
* Platform Specific Overriding Table allows IBV/OEM to pass in platform information to AGESA
* (e.g. MemClk routing, the number of DIMM slots per channel,...). If PlatformSpecificTable
* is populated, AGESA will base its settings on the data from the table. Otherwise, it will
* use its default conservative settings.
*/
static CONST PSO_ENTRY ROMDATA PlatformMemoryTable[] = {
NUMBER_OF_DIMMS_SUPPORTED(ANY_SOCKET, ANY_CHANNEL, 2),
NUMBER_OF_CHANNELS_SUPPORTED(ANY_SOCKET, 1),
PSO_END
};
void board_BeforeInitPost(struct sysinfo *cb, AMD_POST_PARAMS *InitPost)
{
InitPost->MemConfig.PlatformMemoryConfiguration = (PSO_ENTRY *)PlatformMemoryTable;
}
| gpl-2.0 |
HowWen-CPE/cpe | target/linux/atheros/wp838/wp838_QCA10.1.436/files/drivers/net/ethernet/s17_ssdk/src/fal_uk/fal_port_ctrl.c | 9795 | /*
* Copyright (c) 2012 Qualcomm Atheros, Inc.
* All rights reserved.
* Qualcomm Atheros Confidential and Proprietary.
*
*/
#include "sw.h"
#include "sw_ioctl.h"
#include "fal_port_ctrl.h"
#include "fal_uk_if.h"
sw_error_t
fal_port_duplex_set(a_uint32_t dev_id, fal_port_t port_id,
fal_port_duplex_t duplex)
{
sw_error_t rv;
rv = sw_uk_exec(SW_API_PT_DUPLEX_SET, dev_id, port_id,
(a_uint32_t) duplex);
return rv;
}
sw_error_t
fal_port_duplex_get(a_uint32_t dev_id, fal_port_t port_id,
fal_port_duplex_t * pduplex)
{
sw_error_t rv;
rv = sw_uk_exec(SW_API_PT_DUPLEX_GET, dev_id, port_id,
(a_uint32_t) pduplex);
return rv;
}
sw_error_t
fal_port_speed_set(a_uint32_t dev_id, fal_port_t port_id,
fal_port_speed_t speed)
{
sw_error_t rv;
rv = sw_uk_exec(SW_API_PT_SPEED_SET, dev_id, port_id,
(a_uint32_t) speed);
return rv;
}
sw_error_t
fal_port_speed_get(a_uint32_t dev_id, fal_port_t port_id,
fal_port_speed_t * pspeed)
{
sw_error_t rv;
rv = sw_uk_exec(SW_API_PT_SPEED_GET, dev_id, port_id,
(a_uint32_t) pspeed);
return rv;
}
sw_error_t
fal_port_autoneg_status_get(a_uint32_t dev_id, fal_port_t port_id,
a_bool_t * status)
{
sw_error_t rv;
rv = sw_uk_exec(SW_API_PT_AN_GET, dev_id, port_id, (a_uint32_t) status);
return rv;
}
sw_error_t
fal_port_autoneg_enable(a_uint32_t dev_id, fal_port_t port_id)
{
sw_error_t rv;
rv = sw_uk_exec(SW_API_PT_AN_ENABLE, dev_id, port_id);
return rv;
}
sw_error_t
fal_port_autoneg_restart(a_uint32_t dev_id, fal_port_t port_id)
{
sw_error_t rv;
rv = sw_uk_exec(SW_API_PT_AN_RESTART, dev_id, port_id);
return rv;
}
sw_error_t
fal_port_autoneg_adv_set(a_uint32_t dev_id, fal_port_t port_id,
a_uint32_t autoadv)
{
sw_error_t rv;
rv = sw_uk_exec(SW_API_PT_AN_ADV_SET, dev_id, port_id, autoadv);
return rv;
}
sw_error_t
fal_port_autoneg_adv_get(a_uint32_t dev_id, fal_port_t port_id,
a_uint32_t * autoadv)
{
sw_error_t rv;
rv = sw_uk_exec(SW_API_PT_AN_ADV_GET, dev_id, port_id,
(a_uint32_t) autoadv);
return rv;
}
sw_error_t
fal_port_hdr_status_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable)
{
sw_error_t rv;
rv = sw_uk_exec(SW_API_PT_HDR_SET, dev_id, port_id, (a_uint32_t) enable);
return rv;
}
sw_error_t
fal_port_hdr_status_get(a_uint32_t dev_id, fal_port_t port_id,
a_bool_t * enable)
{
sw_error_t rv;
rv = sw_uk_exec(SW_API_PT_HDR_GET, dev_id, port_id, (a_uint32_t) enable);
return rv;
}
sw_error_t
fal_port_flowctrl_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable)
{
sw_error_t rv;
rv = sw_uk_exec(SW_API_PT_FLOWCTRL_SET, dev_id, port_id,
(a_uint32_t) enable);
return rv;
}
sw_error_t
fal_port_flowctrl_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t * enable)
{
sw_error_t rv;
rv = sw_uk_exec(SW_API_PT_FLOWCTRL_GET, dev_id, port_id,
(a_uint32_t) enable);
return rv;
}
sw_error_t
fal_port_flowctrl_forcemode_set(a_uint32_t dev_id, fal_port_t port_id,
a_bool_t enable)
{
sw_error_t rv;
rv = sw_uk_exec(SW_API_PT_FLOWCTRL_MODE_SET, dev_id, port_id,
(a_uint32_t) enable);
return rv;
}
sw_error_t
fal_port_flowctrl_forcemode_get(a_uint32_t dev_id, fal_port_t port_id,
a_bool_t * enable)
{
sw_error_t rv;
rv = sw_uk_exec(SW_API_PT_FLOWCTRL_MODE_GET, dev_id, port_id,
(a_uint32_t) enable);
return rv;
}
sw_error_t
fal_port_powersave_set(a_uint32_t dev_id, fal_port_t port_id,
a_bool_t enable)
{
sw_error_t rv;
rv = sw_uk_exec(SW_API_PT_POWERSAVE_SET, dev_id, port_id,
(a_uint32_t) enable);
return rv;
}
sw_error_t
fal_port_powersave_get(a_uint32_t dev_id, fal_port_t port_id,
a_bool_t * enable)
{
sw_error_t rv;
rv = sw_uk_exec(SW_API_PT_POWERSAVE_GET, dev_id, port_id,
(a_uint32_t) enable);
return rv;
}
sw_error_t
fal_port_hibernate_set(a_uint32_t dev_id, fal_port_t port_id,
a_bool_t enable)
{
sw_error_t rv;
rv = sw_uk_exec(SW_API_PT_HIBERNATE_SET, dev_id, port_id,
(a_uint32_t) enable);
return rv;
}
sw_error_t
fal_port_hibernate_get(a_uint32_t dev_id, fal_port_t port_id,
a_bool_t * enable)
{
sw_error_t rv;
rv = sw_uk_exec(SW_API_PT_HIBERNATE_GET, dev_id, port_id,
(a_uint32_t) enable);
return rv;
}
sw_error_t
fal_port_cdt(a_uint32_t dev_id, fal_port_t port_id, a_uint32_t mdi_pair,
a_uint32_t *cable_status, a_uint32_t *cable_len)
{
sw_error_t rv;
rv = sw_uk_exec(SW_API_PT_CDT, dev_id, port_id, mdi_pair,
(a_uint32_t) cable_status, (a_uint32_t)cable_len);
return rv;
}
sw_error_t
fal_port_rxhdr_mode_set(a_uint32_t dev_id, fal_port_t port_id,
fal_port_header_mode_t mode)
{
sw_error_t rv;
rv = sw_uk_exec(SW_API_PT_RXHDR_SET, dev_id, port_id, (a_uint32_t)mode);
return rv;
}
sw_error_t
fal_port_rxhdr_mode_get(a_uint32_t dev_id, fal_port_t port_id,
fal_port_header_mode_t * mode)
{
sw_error_t rv;
rv = sw_uk_exec(SW_API_PT_RXHDR_GET, dev_id, port_id, (a_uint32_t)mode);
return rv;
}
sw_error_t
fal_port_txhdr_mode_set(a_uint32_t dev_id, fal_port_t port_id,
fal_port_header_mode_t mode)
{
sw_error_t rv;
rv = sw_uk_exec(SW_API_PT_TXHDR_SET, dev_id, port_id, (a_uint32_t)mode);
return rv;
}
sw_error_t
fal_port_txhdr_mode_get(a_uint32_t dev_id, fal_port_t port_id,
fal_port_header_mode_t * mode)
{
sw_error_t rv;
rv = sw_uk_exec(SW_API_PT_TXHDR_GET, dev_id, port_id, (a_uint32_t)mode);
return rv;
}
sw_error_t
fal_header_type_set(a_uint32_t dev_id, a_bool_t enable, a_uint32_t type)
{
sw_error_t rv;
rv = sw_uk_exec(SW_API_HEADER_TYPE_SET, dev_id, (a_uint32_t)enable, type);
return rv;
}
sw_error_t
fal_header_type_get(a_uint32_t dev_id, a_bool_t * enable, a_uint32_t * type)
{
sw_error_t rv;
rv = sw_uk_exec(SW_API_HEADER_TYPE_GET, dev_id, (a_uint32_t)enable, (a_uint32_t)type);
return rv;
}
sw_error_t
fal_port_txmac_status_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable)
{
sw_error_t rv;
rv = sw_uk_exec(SW_API_TXMAC_STATUS_SET, dev_id, port_id, (a_uint32_t)enable);
return rv;
}
sw_error_t
fal_port_txmac_status_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t * enable)
{
sw_error_t rv;
rv = sw_uk_exec(SW_API_TXMAC_STATUS_GET, dev_id, port_id, (a_uint32_t)enable);
return rv;
}
sw_error_t
fal_port_rxmac_status_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable)
{
sw_error_t rv;
rv = sw_uk_exec(SW_API_RXMAC_STATUS_SET, dev_id, port_id, (a_uint32_t)enable);
return rv;
}
sw_error_t
fal_port_rxmac_status_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t * enable)
{
sw_error_t rv;
rv = sw_uk_exec(SW_API_RXMAC_STATUS_GET, dev_id, port_id, (a_uint32_t)enable);
return rv;
}
sw_error_t
fal_port_txfc_status_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable)
{
sw_error_t rv;
rv = sw_uk_exec(SW_API_TXFC_STATUS_SET, dev_id, port_id, (a_uint32_t)enable);
return rv;
}
sw_error_t
fal_port_txfc_status_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t * enable)
{
sw_error_t rv;
rv = sw_uk_exec(SW_API_TXFC_STATUS_GET, dev_id, port_id, (a_uint32_t)enable);
return rv;
}
sw_error_t
fal_port_rxfc_status_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable)
{
sw_error_t rv;
rv = sw_uk_exec(SW_API_RXFC_STATUS_SET, dev_id, port_id, (a_uint32_t)enable);
return rv;
}
sw_error_t
fal_port_rxfc_status_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t * enable)
{
sw_error_t rv;
rv = sw_uk_exec(SW_API_RXFC_STATUS_GET, dev_id, port_id, (a_uint32_t)enable);
return rv;
}
sw_error_t
fal_port_bp_status_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable)
{
sw_error_t rv;
rv = sw_uk_exec(SW_API_BP_STATUS_SET, dev_id, port_id, (a_uint32_t)enable);
return rv;
}
sw_error_t
fal_port_bp_status_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t * enable)
{
sw_error_t rv;
rv = sw_uk_exec(SW_API_BP_STATUS_GET, dev_id, port_id, (a_uint32_t)enable);
return rv;
}
sw_error_t
fal_port_link_forcemode_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable)
{
sw_error_t rv;
rv = sw_uk_exec(SW_API_PT_LINK_MODE_SET, dev_id, port_id, (a_uint32_t)enable);
return rv;
}
sw_error_t
fal_port_link_forcemode_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t * enable)
{
sw_error_t rv;
rv = sw_uk_exec(SW_API_PT_LINK_MODE_GET, dev_id, port_id, (a_uint32_t)enable);
return rv;
}
sw_error_t
fal_port_link_status_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t * status)
{
sw_error_t rv;
rv = sw_uk_exec(SW_API_PT_LINK_STATUS_GET, dev_id, port_id, (a_uint32_t)status);
return rv;
}
sw_error_t
fal_port_mac_loopback_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable)
{
sw_error_t rv;
rv = sw_uk_exec(SW_API_PT_MAC_LOOPBACK_SET, dev_id, port_id, (a_uint32_t)enable);
return rv;
}
sw_error_t
fal_port_mac_loopback_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t *enable)
{
sw_error_t rv;
rv = sw_uk_exec(SW_API_PT_MAC_LOOPBACK_GET, dev_id, port_id, (a_uint32_t)enable);
return rv;
}
| gpl-2.0 |
elijah513/ice | cpp/test/Ice/background/Acceptor.h | 1166 | // **********************************************************************
//
// Copyright (c) 2003-2015 ZeroC, Inc. All rights reserved.
//
// This copy of Ice is licensed to you under the terms described in the
// ICE_LICENSE file included in this distribution.
//
// **********************************************************************
#ifndef TEST_ACCEPTOR_H
#define TEST_ACCEPTOR_H
#include <Ice/Acceptor.h>
#include <EndpointI.h>
class Acceptor : public IceInternal::Acceptor
{
public:
virtual IceInternal::NativeInfoPtr getNativeInfo();
virtual void close();
virtual IceInternal::EndpointIPtr listen();
#ifdef ICE_USE_IOCP
virtual void startAccept();
virtual void finishAccept();
#endif
virtual IceInternal::TransceiverPtr accept();
virtual std::string protocol() const;
virtual std::string toString() const;
virtual std::string toDetailedString() const;
IceInternal::AcceptorPtr delegate() const { return _acceptor; }
private:
Acceptor(const EndpointIPtr&, const IceInternal::AcceptorPtr&);
friend class EndpointI;
EndpointIPtr _endpoint;
const IceInternal::AcceptorPtr _acceptor;
};
#endif
| gpl-2.0 |
coreentin/android_kernel_nvidia_s8515 | drivers/mmc/host/sdhci-tegra.c | 73757 | /*
* Copyright (C) 2010 Google, Inc.
*
* Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/err.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/gpio.h>
#include <linux/slab.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include <linux/module.h>
#include <linux/mmc/sd.h>
#include <linux/mmc/mmc.h>
#include <linux/regulator/consumer.h>
#include <linux/delay.h>
#include <linux/pm_runtime.h>
#include <asm/gpio.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/reboot.h>
#include <mach/gpio-tegra.h>
#include <mach/sdhci.h>
#include <mach/io_dpd.h>
#include <mach/pinmux.h>
#include <mach/clk.h>
#include "sdhci-pltfm.h"
#define SDHCI_VNDR_CLK_CTRL 0x100
#define SDHCI_VNDR_CLK_CTRL_SDMMC_CLK 0x1
#define SDHCI_VNDR_CLK_CTRL_PADPIPE_CLKEN_OVERRIDE 0x8
#define SDHCI_VNDR_CLK_CTRL_SPI_MODE_CLKEN_OVERRIDE 0x4
#define SDHCI_VNDR_CLK_CNTL_INPUT_IO_CLK 0x2
#define SDHCI_VNDR_CLK_CTRL_BASE_CLK_FREQ_SHIFT 8
#define SDHCI_VNDR_CLK_CTRL_TAP_VALUE_SHIFT 16
#define SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_SHIFT 24
#define SDHCI_VNDR_CLK_CTRL_SDR50_TUNING 0x20
#define SDHCI_VNDR_MISC_CTRL 0x120
#define SDHCI_VNDR_MISC_CTRL_ENABLE_SDR104_SUPPORT 0x8
#define SDHCI_VNDR_MISC_CTRL_ENABLE_SDR50_SUPPORT 0x10
#define SDHCI_VNDR_MISC_CTRL_ENABLE_DDR50_SUPPORT 0x200
#define SDHCI_VNDR_MISC_CTRL_ENABLE_SD_3_0 0x20
#define SDHCI_VNDR_MISC_CTRL_INFINITE_ERASE_TIMEOUT 0x1
#define SDMMC_SDMEMCOMPPADCTRL 0x1E0
#define SDMMC_SDMEMCOMPPADCTRL_VREF_SEL_MASK 0xF
#define SDMMC_AUTO_CAL_CONFIG 0x1E4
#define SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_START 0x80000000
#define SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_ENABLE 0x20000000
#define SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PD_OFFSET_SHIFT 0x8
#define SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PD_OFFSET 0x70
#define SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PU_OFFSET 0x62
#define SDMMC_AUTO_CAL_STATUS 0x1EC
#define SDMMC_AUTO_CAL_STATUS_AUTO_CAL_ACTIVE 0x80000000
#define SDMMC_AUTO_CAL_STATUS_PULLDOWN_OFFSET 24
#define PULLUP_ADJUSTMENT_OFFSET 20
#define SDHOST_1V8_OCR_MASK 0x8
#define SDHOST_HIGH_VOLT_MIN 2700000
#define SDHOST_HIGH_VOLT_MAX 3600000
#define SDHOST_HIGH_VOLT_2V8 2800000
#define SDHOST_LOW_VOLT_MIN 1800000
#define SDHOST_LOW_VOLT_MAX 1800000
#define TEGRA_SDHOST_MIN_FREQ 50000000
#define TEGRA2_SDHOST_STD_FREQ 50000000
#define TEGRA3_SDHOST_STD_FREQ 104000000
#define MAX_DIVISOR_VALUE 128
#define DEFAULT_SDHOST_FREQ 50000000
#define MMC_TUNING_BLOCK_SIZE_BUS_WIDTH_8 128
#define MMC_TUNING_BLOCK_SIZE_BUS_WIDTH_4 64
#define MAX_TAP_VALUES 255
#define TUNING_FREQ_COUNT 3
#define TUNING_VOLTAGES_COUNT 2
#define TUNING_RETRIES 1
struct sdhci_host *sdhci_host_for_sdio;
static unsigned int uhs_max_freq_MHz[] = {
[MMC_TIMING_UHS_SDR50] = 100,
[MMC_TIMING_UHS_SDR104] = 208,
[MMC_TIMING_MMC_HS200] = 200,
};
#if defined(CONFIG_ARCH_TEGRA_3x_SOC)
static void tegra_3x_sdhci_set_card_clock(struct sdhci_host *sdhci, unsigned int clock);
#endif
static unsigned int tegra_sdhost_min_freq;
static unsigned int tegra_sdhost_std_freq;
struct tegra_sdhci_hw_ops {
/* Set the internal clk and card clk.*/
void (*set_card_clock)(struct sdhci_host *sdhci, unsigned int clock);
};
#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
static struct tegra_sdhci_hw_ops tegra_2x_sdhci_ops = {
};
#elif defined(CONFIG_ARCH_TEGRA_3x_SOC)
static struct tegra_sdhci_hw_ops tegra_3x_sdhci_ops = {
.set_card_clock = tegra_3x_sdhci_set_card_clock,
};
#else
static struct tegra_sdhci_hw_ops tegra_11x_sdhci_ops = {
};
#endif
/* Erratum: Version register is invalid in HW */
#define NVQUIRK_FORCE_SDHCI_SPEC_200 BIT(0)
/* Erratum: Enable block gap interrupt detection */
#define NVQUIRK_ENABLE_BLOCK_GAP_DET BIT(1)
/* Do not enable auto calibration if the platform doesn't support */
#define NVQUIRK_DISABLE_AUTO_CALIBRATION BIT(2)
/* Set Calibration Offsets */
#define NVQUIRK_SET_CALIBRATION_OFFSETS BIT(3)
/* Set Drive Strengths */
#define NVQUIRK_SET_DRIVE_STRENGTH BIT(4)
/* Enable PADPIPE CLKEN */
#define NVQUIRK_ENABLE_PADPIPE_CLKEN BIT(5)
/* DISABLE SPI_MODE CLKEN */
#define NVQUIRK_DISABLE_SPI_MODE_CLKEN BIT(6)
/* Set tap delay */
#define NVQUIRK_SET_TAP_DELAY BIT(7)
/* Set trim delay */
#define NVQUIRK_SET_TRIM_DELAY BIT(8)
/* Enable SDHOST v3.0 support */
#define NVQUIRK_ENABLE_SD_3_0 BIT(9)
/* Enable SDR50 mode */
#define NVQUIRK_ENABLE_SDR50 BIT(10)
/* Enable SDR104 mode */
#define NVQUIRK_ENABLE_SDR104 BIT(11)
/*Enable DDR50 mode */
#define NVQUIRK_ENABLE_DDR50 BIT(12)
/* Enable Frequency Tuning for SDR50 mode */
#define NVQUIRK_ENABLE_SDR50_TUNING BIT(13)
/* Enable Infinite Erase Timeout*/
#define NVQUIRK_INFINITE_ERASE_TIMEOUT BIT(14)
/* Disable AUTO CMD23 */
#define NVQUIRK_DISABLE_AUTO_CMD23 BIT(15)
/* ENAABLE FEEDBACK IO CLOCK */
#define NVQUIRK_EN_FEEDBACK_CLK BIT(16)
struct sdhci_tegra_soc_data {
struct sdhci_pltfm_data *pdata;
u32 nvquirks;
};
struct sdhci_tegra_sd_stats {
unsigned int data_crc_count;
unsigned int cmd_crc_count;
unsigned int data_to_count;
unsigned int cmd_to_count;
};
enum tegra_tuning_freq {
TUNING_LOW_FREQ,
TUNING_HIGH_FREQ,
TUNING_HIGH_FREQ_HV,
};
struct freq_tuning_params {
unsigned int freq_hz;
unsigned int nr_voltages;
unsigned int voltages[TUNING_VOLTAGES_COUNT];
};
static struct freq_tuning_params tuning_params[TUNING_FREQ_COUNT] = {
[TUNING_LOW_FREQ] = {
.freq_hz = 82000000,
.nr_voltages = 1,
.voltages = {1250},
},
[TUNING_HIGH_FREQ] = {
.freq_hz = 156000000,
.nr_voltages = 2,
.voltages = {1250, 1100},
},
[TUNING_HIGH_FREQ_HV] = {
.freq_hz = 156000000,
.nr_voltages = 2,
.voltages = {1390, 1250},
},
};
struct tap_window_data {
unsigned int partial_win;
unsigned int full_win_begin;
unsigned int full_win_end;
unsigned int tuning_ui;
unsigned int sampling_point;
bool abandon_partial_win;
bool abandon_full_win;
};
struct tegra_tuning_data {
unsigned int best_tap_value;
unsigned int best_hv_tap_value;
bool select_partial_win;
bool nominal_vcore_tuning_done;
bool overide_vcore_tuning_done;
bool one_shot_tuning;
struct tap_window_data *tap_data[TUNING_VOLTAGES_COUNT];
};
struct sdhci_tegra {
const struct tegra_sdhci_platform_data *plat;
const struct sdhci_tegra_soc_data *soc_data;
bool clk_enabled;
struct regulator *vdd_io_reg;
struct regulator *vdd_slot_reg;
struct regulator *vcore_reg;
/* Pointer to the chip specific HW ops */
struct tegra_sdhci_hw_ops *hw_ops;
/* Host controller instance */
unsigned int instance;
/* vddio_min */
unsigned int vddio_min_uv;
/* vddio_max */
unsigned int vddio_max_uv;
/* max clk supported by the platform */
unsigned int max_clk_limit;
/* max ddr clk supported by the platform */
unsigned int ddr_clk_limit;
/* SD Hot Plug in Suspend State */
unsigned int sd_detect_in_suspend;
struct tegra_io_dpd *dpd;
bool card_present;
bool is_rail_enabled;
struct clk *emc_clk;
bool emc_clk_enabled;
unsigned int emc_max_clk;
struct sdhci_tegra_sd_stats *sd_stat_head;
unsigned int nominal_vcore_mv;
unsigned int min_vcore_override_mv;
/* Tuning related structures and variables */
/* Tuning opcode to be used */
unsigned int tuning_opcode;
/* Tuning packet size */
unsigned int tuning_bsize;
/* Tuning status */
unsigned int tuning_status;
unsigned int tuning_turbo_enable;
bool is_hv_tuning_done;
#define TUNING_STATUS_DONE 1
#define TUNING_STATUS_RETUNE 2
/* Freq tuning information for each sampling clock freq */
struct tegra_tuning_data tuning_data;
bool set_tuning_override;
bool is_parent_pllc;
struct notifier_block reboot_notify;
};
static struct clk *pll_c;
static struct clk *pll_p;
static unsigned long pll_c_rate;
static unsigned long pll_p_rate;
static int show_error_stats_dump(struct seq_file *s, void *data)
{
struct sdhci_host *host = s->private;
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_tegra *tegra_host = pltfm_host->priv;
struct sdhci_tegra_sd_stats *head;
seq_printf(s, "ErrorStatistics:\n");
seq_printf(s, "DataCRC\tCmdCRC\tDataTimeout\tCmdTimeout\n");
head = tegra_host->sd_stat_head;
if (head != NULL)
seq_printf(s, "%d\t%d\t%d\t%d\n", head->data_crc_count,
head->cmd_crc_count, head->data_to_count,
head->cmd_to_count);
return 0;
}
static int sdhci_error_stats_dump(struct inode *inode, struct file *file)
{
return single_open(file, show_error_stats_dump, inode->i_private);
}
static const struct file_operations sdhci_host_fops = {
.open = sdhci_error_stats_dump,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static u32 tegra_sdhci_readl(struct sdhci_host *host, int reg)
{
#ifndef CONFIG_ARCH_TEGRA_11x_SOC
u32 val;
if (unlikely(reg == SDHCI_PRESENT_STATE)) {
/* Use wp_gpio here instead? */
val = readl(host->ioaddr + reg);
return val | SDHCI_WRITE_PROTECT;
}
#endif
return readl(host->ioaddr + reg);
}
static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
{
#ifdef CONFIG_ARCH_TEGRA_2x_SOC
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_tegra *tegra_host = pltfm_host->priv;
const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
if (unlikely((soc_data->nvquirks & NVQUIRK_FORCE_SDHCI_SPEC_200) &&
(reg == SDHCI_HOST_VERSION))) {
return SDHCI_SPEC_200;
}
#endif
return readw(host->ioaddr + reg);
}
static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
{
#ifdef CONFIG_ARCH_TEGRA_2x_SOC
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_tegra *tegra_host = pltfm_host->priv;
const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
#endif
/* Seems like we're getting spurious timeout and crc errors, so
* disable signalling of them. In case of real errors software
* timers should take care of eventually detecting them.
*/
if (unlikely(reg == SDHCI_SIGNAL_ENABLE))
val &= ~(SDHCI_INT_TIMEOUT|SDHCI_INT_CRC);
writel(val, host->ioaddr + reg);
#ifdef CONFIG_ARCH_TEGRA_2x_SOC
if (unlikely((soc_data->nvquirks & NVQUIRK_ENABLE_BLOCK_GAP_DET) &&
(reg == SDHCI_INT_ENABLE))) {
u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
if (val & SDHCI_INT_CARD_INT)
gap_ctrl |= 0x8;
else
gap_ctrl &= ~0x8;
writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
}
#endif
}
static unsigned int tegra_sdhci_get_cd(struct sdhci_host *sdhci)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
struct sdhci_tegra *tegra_host = pltfm_host->priv;
return tegra_host->card_present;
}
#ifndef CONFIG_ARCH_TEGRA_11x_SOC
static unsigned int tegra_sdhci_get_ro(struct sdhci_host *sdhci)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
struct sdhci_tegra *tegra_host = pltfm_host->priv;
const struct tegra_sdhci_platform_data *plat = tegra_host->plat;
if (!gpio_is_valid(plat->wp_gpio))
return -1;
return gpio_get_value(plat->wp_gpio);
}
#endif
static int tegra_sdhci_set_uhs_signaling(struct sdhci_host *host,
unsigned int uhs)
{
u16 clk, ctrl_2;
ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
/* Select Bus Speed Mode for host */
/* For HS200 we need to set UHS_MODE_SEL to SDR104.
* It works as SDR 104 in SD 4-bit mode and HS200 in eMMC 8-bit mode.
*/
ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
switch (uhs) {
case MMC_TIMING_UHS_SDR12:
ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
break;
case MMC_TIMING_UHS_SDR25:
ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
break;
case MMC_TIMING_UHS_SDR50:
case MMC_TIMING_UHS_SDR104:
case MMC_TIMING_MMC_HS200:
ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
break;
case MMC_TIMING_UHS_DDR50:
ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
break;
}
sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
if (uhs == MMC_TIMING_UHS_DDR50) {
clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
clk &= ~(0xFF << SDHCI_DIVIDER_SHIFT);
clk |= 1 << SDHCI_DIVIDER_SHIFT;
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
}
return 0;
}
static void tegra_sdhci_reset_exit(struct sdhci_host *sdhci, u8 mask)
{
u16 misc_ctrl;
u32 vendor_ctrl;
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
struct sdhci_tegra *tegra_host = pltfm_host->priv;
const struct tegra_sdhci_platform_data *plat = tegra_host->plat;
const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
if (mask & SDHCI_RESET_ALL) {
if (tegra_host->sd_stat_head != NULL) {
tegra_host->sd_stat_head->data_crc_count = 0;
tegra_host->sd_stat_head->cmd_crc_count = 0;
tegra_host->sd_stat_head->data_to_count = 0;
tegra_host->sd_stat_head->cmd_to_count = 0;
}
vendor_ctrl = sdhci_readl(sdhci, SDHCI_VNDR_CLK_CTRL);
if (soc_data->nvquirks & NVQUIRK_ENABLE_PADPIPE_CLKEN) {
vendor_ctrl |=
SDHCI_VNDR_CLK_CTRL_PADPIPE_CLKEN_OVERRIDE;
}
if (soc_data->nvquirks & NVQUIRK_DISABLE_SPI_MODE_CLKEN) {
vendor_ctrl &=
~SDHCI_VNDR_CLK_CTRL_SPI_MODE_CLKEN_OVERRIDE;
}
if (soc_data->nvquirks & NVQUIRK_EN_FEEDBACK_CLK) {
vendor_ctrl &=
~SDHCI_VNDR_CLK_CNTL_INPUT_IO_CLK;
}
if (soc_data->nvquirks & NVQUIRK_SET_TAP_DELAY) {
if ((tegra_host->tuning_status == TUNING_STATUS_DONE) &&
(sdhci->mmc->pm_flags & MMC_PM_KEEP_POWER)) {
vendor_ctrl &= ~(0xFF <<
SDHCI_VNDR_CLK_CTRL_TAP_VALUE_SHIFT);
vendor_ctrl |=
(tegra_host->tuning_data.best_tap_value
<< SDHCI_VNDR_CLK_CTRL_TAP_VALUE_SHIFT);
} else {
if (plat->tap_delay) {
vendor_ctrl &= ~(0xFF <<
SDHCI_VNDR_CLK_CTRL_TAP_VALUE_SHIFT);
vendor_ctrl |= (plat->tap_delay <<
SDHCI_VNDR_CLK_CTRL_TAP_VALUE_SHIFT);
}
}
}
if (soc_data->nvquirks & NVQUIRK_SET_TRIM_DELAY) {
if (plat->trim_delay) {
vendor_ctrl &= ~(0x1F <<
SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_SHIFT);
vendor_ctrl |= (plat->trim_delay <<
SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_SHIFT);
}
}
if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50_TUNING)
vendor_ctrl |= SDHCI_VNDR_CLK_CTRL_SDR50_TUNING;
sdhci_writel(sdhci, vendor_ctrl, SDHCI_VNDR_CLK_CTRL);
misc_ctrl = sdhci_readw(sdhci, SDHCI_VNDR_MISC_CTRL);
if (soc_data->nvquirks & NVQUIRK_ENABLE_SD_3_0)
misc_ctrl |= SDHCI_VNDR_MISC_CTRL_ENABLE_SD_3_0;
if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104) {
misc_ctrl |=
SDHCI_VNDR_MISC_CTRL_ENABLE_SDR104_SUPPORT;
}
if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50) {
misc_ctrl |=
SDHCI_VNDR_MISC_CTRL_ENABLE_SDR50_SUPPORT;
}
/* Enable DDR mode support only for SDMMC4 */
if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50) {
if (tegra_host->instance == 3) {
misc_ctrl |=
SDHCI_VNDR_MISC_CTRL_ENABLE_DDR50_SUPPORT;
}
}
if (soc_data->nvquirks & NVQUIRK_INFINITE_ERASE_TIMEOUT) {
misc_ctrl |=
SDHCI_VNDR_MISC_CTRL_INFINITE_ERASE_TIMEOUT;
}
sdhci_writew(sdhci, misc_ctrl, SDHCI_VNDR_MISC_CTRL);
/* Mask Auto CMD23 if CMD23 is enabled */
if ((sdhci->mmc->caps & MMC_CAP_CMD23) &&
(soc_data->nvquirks & NVQUIRK_DISABLE_AUTO_CMD23))
sdhci->flags &= ~SDHCI_AUTO_CMD23;
/* Mask the support for any UHS modes if specified */
if (plat->uhs_mask & MMC_UHS_MASK_SDR104)
sdhci->mmc->caps &= ~MMC_CAP_UHS_SDR104;
if (plat->uhs_mask & MMC_UHS_MASK_DDR50)
sdhci->mmc->caps &= ~MMC_CAP_UHS_DDR50;
if (plat->uhs_mask & MMC_UHS_MASK_SDR50)
sdhci->mmc->caps &= ~MMC_CAP_UHS_SDR50;
if (plat->uhs_mask & MMC_UHS_MASK_SDR25)
sdhci->mmc->caps &= ~MMC_CAP_UHS_SDR25;
if (plat->uhs_mask & MMC_UHS_MASK_SDR12)
sdhci->mmc->caps &= ~MMC_CAP_UHS_SDR12;
if (plat->uhs_mask & MMC_MASK_HS200)
sdhci->mmc->caps2 &= ~MMC_CAP2_HS200;
}
}
static void sdhci_status_notify_cb(int card_present, void *dev_id)
{
struct sdhci_host *sdhci = (struct sdhci_host *)dev_id;
struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
struct tegra_sdhci_platform_data *plat;
unsigned int status, oldstat;
pr_debug("%s: card_present %d\n", mmc_hostname(sdhci->mmc),
card_present);
plat = pdev->dev.platform_data;
if (!plat->mmc_data.status) {
if (card_present == 1) {
sdhci->mmc->rescan_disable = 0;
mmc_detect_change(sdhci->mmc, 0);
} else if (card_present == 0) {
sdhci->mmc->detect_change = 0;
sdhci->mmc->rescan_disable = 1;
}
return;
}
status = plat->mmc_data.status(mmc_dev(sdhci->mmc));
oldstat = plat->mmc_data.card_present;
plat->mmc_data.card_present = status;
if (status ^ oldstat) {
pr_debug("%s: Slot status change detected (%d -> %d)\n",
mmc_hostname(sdhci->mmc), oldstat, status);
if (status && !plat->mmc_data.built_in)
mmc_detect_change(sdhci->mmc, (5 * HZ) / 2);
else
mmc_detect_change(sdhci->mmc, 0);
}
}
static irqreturn_t carddetect_irq(int irq, void *data)
{
struct sdhci_host *sdhost = (struct sdhci_host *)data;
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhost);
struct sdhci_tegra *tegra_host = pltfm_host->priv;
struct platform_device *pdev = to_platform_device(mmc_dev(sdhost->mmc));
struct tegra_sdhci_platform_data *plat;
plat = pdev->dev.platform_data;
tegra_host->card_present = (gpio_get_value(plat->cd_gpio) == 0);
if (tegra_host->card_present) {
if (!tegra_host->is_rail_enabled) {
if (tegra_host->vdd_slot_reg)
regulator_enable(tegra_host->vdd_slot_reg);
if (tegra_host->vdd_io_reg)
regulator_enable(tegra_host->vdd_io_reg);
tegra_host->is_rail_enabled = 1;
}
} else {
if (tegra_host->is_rail_enabled) {
if (tegra_host->vdd_io_reg)
regulator_disable(tegra_host->vdd_io_reg);
if (tegra_host->vdd_slot_reg)
regulator_disable(tegra_host->vdd_slot_reg);
tegra_host->is_rail_enabled = 0;
}
/*
* Set retune request as tuning should be done next time
* a card is inserted.
*/
tegra_host->tuning_status = TUNING_STATUS_RETUNE;
}
tasklet_schedule(&sdhost->card_tasklet);
return IRQ_HANDLED;
};
static int tegra_sdhci_8bit(struct sdhci_host *sdhci, int bus_width)
{
struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
const struct tegra_sdhci_platform_data *plat;
u32 ctrl;
plat = pdev->dev.platform_data;
ctrl = sdhci_readb(sdhci, SDHCI_HOST_CONTROL);
if (plat->is_8bit && bus_width == MMC_BUS_WIDTH_8) {
ctrl &= ~SDHCI_CTRL_4BITBUS;
ctrl |= SDHCI_CTRL_8BITBUS;
} else {
ctrl &= ~SDHCI_CTRL_8BITBUS;
if (bus_width == MMC_BUS_WIDTH_4)
ctrl |= SDHCI_CTRL_4BITBUS;
else
ctrl &= ~SDHCI_CTRL_4BITBUS;
}
sdhci_writeb(sdhci, ctrl, SDHCI_HOST_CONTROL);
return 0;
}
/*
* Calculation of nearest clock frequency for desired rate:
* Get the divisor value, div = p / d_rate
* 1. If it is nearer to ceil(p/d_rate) then increment the div value by 0.5 and
* nearest_rate, i.e. result = p / (div + 0.5) = (p << 1)/((div << 1) + 1).
* 2. If not, result = p / div
* As the nearest clk freq should be <= to desired_rate,
* 3. If result > desired_rate then increment the div by 0.5
* and do, (p << 1)/((div << 1) + 1)
* 4. Else return result
* Here, If condtions 1 & 3 are both satisfied then to keep track of div value,
* defined index variable.
*/
static unsigned long get_nearest_clock_freq(unsigned long pll_rate,
unsigned long desired_rate)
{
unsigned long result;
int div;
int index = 1;
div = pll_rate / desired_rate;
if (div > MAX_DIVISOR_VALUE) {
div = MAX_DIVISOR_VALUE;
result = pll_rate / div;
} else {
if ((pll_rate % desired_rate) >= (desired_rate / 2))
result = (pll_rate << 1) / ((div << 1) + index++);
else
result = pll_rate / div;
if (desired_rate < result) {
/*
* Trying to get lower clock freq than desired clock,
* by increasing the divisor value by 0.5
*/
result = (pll_rate << 1) / ((div << 1) + index);
}
}
return result;
}
static void tegra_sdhci_clock_set_parent(struct sdhci_host *host,
unsigned long desired_rate)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_tegra *tegra_host = pltfm_host->priv;
struct clk *parent_clk;
unsigned long pll_c_freq;
unsigned long pll_p_freq;
int rc;
pll_c_freq = get_nearest_clock_freq(pll_c_rate, desired_rate);
pll_p_freq = get_nearest_clock_freq(pll_p_rate, desired_rate);
if (pll_c_freq > pll_p_freq) {
if (!tegra_host->is_parent_pllc) {
parent_clk = pll_c;
tegra_host->is_parent_pllc = true;
clk_set_rate(pltfm_host->clk, DEFAULT_SDHOST_FREQ);
} else
return;
} else if (tegra_host->is_parent_pllc) {
parent_clk = pll_p;
tegra_host->is_parent_pllc = false;
} else
return;
rc = clk_set_parent(pltfm_host->clk, parent_clk);
if (rc)
pr_err("%s: failed to set pll parent clock %d\n",
mmc_hostname(host->mmc), rc);
}
static void tegra_sdhci_set_clk_rate(struct sdhci_host *sdhci,
unsigned int clock)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
struct sdhci_tegra *tegra_host = pltfm_host->priv;
unsigned int clk_rate;
unsigned int emc_clk;
if (sdhci->mmc->ios.timing == MMC_TIMING_UHS_DDR50) {
/*
* In ddr mode, tegra sdmmc controller clock frequency
* should be double the card clock frequency.
*/
if (tegra_host->ddr_clk_limit) {
clk_rate = tegra_host->ddr_clk_limit * 2;
if (tegra_host->emc_clk) {
emc_clk = clk_get_rate(tegra_host->emc_clk);
if (emc_clk == tegra_host->emc_max_clk)
clk_rate = clock * 2;
}
} else {
clk_rate = clock * 2;
}
} else if (sdhci->mmc->ios.timing == MMC_TIMING_UHS_SDR50) {
/*
* In SDR50 mode, run the sdmmc controller at freq greater than
* 104MHz to ensure the core voltage is at 1.2V.
* If the core voltage is below 1.2V, CRC errors would occur
* during data transfers.
*/
clk_rate = clock * 2;
} else {
if (clock <= tegra_sdhost_min_freq)
clk_rate = tegra_sdhost_min_freq;
else if (clock <= tegra_sdhost_std_freq)
clk_rate = tegra_sdhost_std_freq;
else
clk_rate = clock;
}
if (tegra_host->max_clk_limit &&
(clk_rate > tegra_host->max_clk_limit))
clk_rate = tegra_host->max_clk_limit;
tegra_sdhci_clock_set_parent(sdhci, clk_rate);
clk_set_rate(pltfm_host->clk, clk_rate);
sdhci->max_clk = clk_get_rate(pltfm_host->clk);
#ifdef CONFIG_TEGRA_FPGA_PLATFORM
/* FPGA supports 26MHz of clock for SDMMC. */
sdhci->max_clk = 26000000;
#endif
}
#ifdef CONFIG_ARCH_TEGRA_3x_SOC
static void tegra_3x_sdhci_set_card_clock(struct sdhci_host *sdhci, unsigned int clock)
{
int div;
u16 clk;
unsigned long timeout;
u8 ctrl;
if (clock && clock == sdhci->clock)
return;
/*
* Disable the card clock before disabling the internal
* clock to avoid abnormal clock waveforms.
*/
clk = sdhci_readw(sdhci, SDHCI_CLOCK_CONTROL);
clk &= ~SDHCI_CLOCK_CARD_EN;
sdhci_writew(sdhci, clk, SDHCI_CLOCK_CONTROL);
sdhci_writew(sdhci, 0, SDHCI_CLOCK_CONTROL);
if (clock == 0)
goto out;
if (sdhci->mmc->ios.timing == MMC_TIMING_UHS_DDR50) {
div = 1;
goto set_clk;
}
if (sdhci->version >= SDHCI_SPEC_300) {
/* Version 3.00 divisors must be a multiple of 2. */
if (sdhci->max_clk <= clock) {
div = 1;
} else {
for (div = 2; div < SDHCI_MAX_DIV_SPEC_300; div += 2) {
if ((sdhci->max_clk / div) <= clock)
break;
}
}
} else {
/* Version 2.00 divisors must be a power of 2. */
for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
if ((sdhci->max_clk / div) <= clock)
break;
}
}
div >>= 1;
/*
* Tegra3 sdmmc controller internal clock will not be stabilized when
* we use a clock divider value greater than 4. The WAR is as follows.
* - Enable internal clock.
* - Wait for 5 usec and do a dummy write.
* - Poll for clk stable.
*/
set_clk:
clk = (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
<< SDHCI_DIVIDER_HI_SHIFT;
clk |= SDHCI_CLOCK_INT_EN;
sdhci_writew(sdhci, clk, SDHCI_CLOCK_CONTROL);
/* Wait for 5 usec */
udelay(5);
/* Do a dummy write */
ctrl = sdhci_readb(sdhci, SDHCI_CAPABILITIES);
ctrl |= 1;
sdhci_writeb(sdhci, ctrl, SDHCI_CAPABILITIES);
/* Wait max 20 ms */
timeout = 20;
while (!((clk = sdhci_readw(sdhci, SDHCI_CLOCK_CONTROL))
& SDHCI_CLOCK_INT_STABLE)) {
if (timeout == 0) {
dev_err(mmc_dev(sdhci->mmc), "Internal clock never stabilised\n");
return;
}
timeout--;
mdelay(1);
}
clk |= SDHCI_CLOCK_CARD_EN;
sdhci_writew(sdhci, clk, SDHCI_CLOCK_CONTROL);
out:
sdhci->clock = clock;
}
#endif /* #ifdef CONFIG_ARCH_TEGRA_3x_SOC */
static void tegra_sdhci_set_clock(struct sdhci_host *sdhci, unsigned int clock)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
struct sdhci_tegra *tegra_host = pltfm_host->priv;
struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
u8 ctrl;
pr_debug("%s %s %u enabled=%u\n", __func__,
mmc_hostname(sdhci->mmc), clock, tegra_host->clk_enabled);
if (clock) {
/* bring out sd instance from io dpd mode */
if (tegra_host->dpd) {
mutex_lock(&tegra_host->dpd->delay_lock);
cancel_delayed_work_sync(&tegra_host->dpd->delay_dpd);
tegra_io_dpd_disable(tegra_host->dpd);
mutex_unlock(&tegra_host->dpd->delay_lock);
}
if (!tegra_host->clk_enabled) {
pm_runtime_get_sync(&pdev->dev);
clk_prepare_enable(pltfm_host->clk);
ctrl = sdhci_readb(sdhci, SDHCI_VNDR_CLK_CTRL);
ctrl |= SDHCI_VNDR_CLK_CTRL_SDMMC_CLK;
sdhci_writeb(sdhci, ctrl, SDHCI_VNDR_CLK_CTRL);
tegra_host->clk_enabled = true;
}
tegra_sdhci_set_clk_rate(sdhci, clock);
if (tegra_host->hw_ops->set_card_clock)
tegra_host->hw_ops->set_card_clock(sdhci, clock);
} else if (!clock && tegra_host->clk_enabled) {
if (tegra_host->hw_ops->set_card_clock)
tegra_host->hw_ops->set_card_clock(sdhci, clock);
ctrl = sdhci_readb(sdhci, SDHCI_VNDR_CLK_CTRL);
ctrl &= ~SDHCI_VNDR_CLK_CTRL_SDMMC_CLK;
sdhci_writeb(sdhci, ctrl, SDHCI_VNDR_CLK_CTRL);
clk_disable_unprepare(pltfm_host->clk);
pm_runtime_put_sync(&pdev->dev);
tegra_host->clk_enabled = false;
/* io dpd enable call for sd instance */
if (tegra_host->dpd) {
mutex_lock(&tegra_host->dpd->delay_lock);
if (tegra_host->dpd->need_delay_dpd) {
schedule_delayed_work(
&tegra_host->dpd->delay_dpd,
msecs_to_jiffies(100));
} else {
tegra_io_dpd_enable(tegra_host->dpd);
}
mutex_unlock(&tegra_host->dpd->delay_lock);
}
}
}
static void tegra_sdhci_do_calibration(struct sdhci_host *sdhci)
{
unsigned int val;
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
struct sdhci_tegra *tegra_host = pltfm_host->priv;
const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
unsigned int timeout = 10;
/* No Calibration for sdmmc4 */
if (tegra_host->instance == 3)
return;
if (unlikely(soc_data->nvquirks & NVQUIRK_DISABLE_AUTO_CALIBRATION))
return;
val = sdhci_readl(sdhci, SDMMC_SDMEMCOMPPADCTRL);
val &= ~SDMMC_SDMEMCOMPPADCTRL_VREF_SEL_MASK;
val |= 0x7;
sdhci_writel(sdhci, val, SDMMC_SDMEMCOMPPADCTRL);
/* Enable Auto Calibration*/
val = sdhci_readl(sdhci, SDMMC_AUTO_CAL_CONFIG);
val |= SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_ENABLE;
val |= SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_START;
if (unlikely(soc_data->nvquirks & NVQUIRK_SET_CALIBRATION_OFFSETS)) {
/* Program Auto cal PD offset(bits 8:14) */
val &= ~(0x7F <<
SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PD_OFFSET_SHIFT);
val |= (SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PD_OFFSET <<
SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PD_OFFSET_SHIFT);
/* Program Auto cal PU offset(bits 0:6) */
val &= ~0x7F;
val |= SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PU_OFFSET;
}
sdhci_writel(sdhci, val, SDMMC_AUTO_CAL_CONFIG);
/* Wait until the calibration is done */
do {
if (!(sdhci_readl(sdhci, SDMMC_AUTO_CAL_STATUS) &
SDMMC_AUTO_CAL_STATUS_AUTO_CAL_ACTIVE))
break;
mdelay(1);
timeout--;
} while (timeout);
if (!timeout)
dev_err(mmc_dev(sdhci->mmc), "Auto calibration failed\n");
/* Disable Auto calibration */
val = sdhci_readl(sdhci, SDMMC_AUTO_CAL_CONFIG);
val &= ~SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_ENABLE;
sdhci_writel(sdhci, val, SDMMC_AUTO_CAL_CONFIG);
if (unlikely(soc_data->nvquirks & NVQUIRK_SET_DRIVE_STRENGTH)) {
unsigned int pulldown_code;
unsigned int pullup_code;
int pg;
int err;
pg = tegra_drive_get_pingroup(mmc_dev(sdhci->mmc));
if (pg != -1) {
/* Get the pull down codes from auto cal status reg */
pulldown_code = (
sdhci_readl(sdhci, SDMMC_AUTO_CAL_STATUS) >>
SDMMC_AUTO_CAL_STATUS_PULLDOWN_OFFSET);
/* Set the pull down in the pinmux reg */
err = tegra_drive_pinmux_set_pull_down(pg,
pulldown_code);
if (err)
dev_err(mmc_dev(sdhci->mmc),
"Failed to set pulldown codes %d err %d\n",
pulldown_code, err);
/* Calculate the pull up codes */
pullup_code = pulldown_code + PULLUP_ADJUSTMENT_OFFSET;
if (pullup_code >= TEGRA_MAX_PULL)
pullup_code = TEGRA_MAX_PULL - 1;
/* Set the pull up code in the pinmux reg */
err = tegra_drive_pinmux_set_pull_up(pg, pullup_code);
if (err)
dev_err(mmc_dev(sdhci->mmc),
"Failed to set pullup codes %d err %d\n",
pullup_code, err);
}
}
}
static int tegra_sdhci_signal_voltage_switch(struct sdhci_host *sdhci,
unsigned int signal_voltage)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
struct sdhci_tegra *tegra_host = pltfm_host->priv;
unsigned int min_uV = tegra_host->vddio_min_uv;
unsigned int max_uV = tegra_host->vddio_max_uv;
unsigned int rc = 0;
u16 clk, ctrl;
ctrl = sdhci_readw(sdhci, SDHCI_HOST_CONTROL2);
if (signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
ctrl |= SDHCI_CTRL_VDD_180;
min_uV = SDHOST_LOW_VOLT_MIN;
max_uV = SDHOST_LOW_VOLT_MAX;
} else if (signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
if (ctrl & SDHCI_CTRL_VDD_180)
ctrl &= ~SDHCI_CTRL_VDD_180;
}
/* Check if the slot can support the required voltage */
if (min_uV > tegra_host->vddio_max_uv)
return 0;
/* Switch OFF the card clock to prevent glitches on the clock line */
clk = sdhci_readw(sdhci, SDHCI_CLOCK_CONTROL);
clk &= ~SDHCI_CLOCK_CARD_EN;
sdhci_writew(sdhci, clk, SDHCI_CLOCK_CONTROL);
/* Set/clear the 1.8V signalling */
sdhci_writew(sdhci, ctrl, SDHCI_HOST_CONTROL2);
/* Switch the I/O rail voltage */
if (tegra_host->vdd_io_reg) {
rc = regulator_set_voltage(tegra_host->vdd_io_reg,
min_uV, max_uV);
if (rc) {
dev_err(mmc_dev(sdhci->mmc), "switching to 1.8V"
"failed . Switching back to 3.3V\n");
rc = regulator_set_voltage(tegra_host->vdd_io_reg,
SDHOST_HIGH_VOLT_MIN,
SDHOST_HIGH_VOLT_MAX);
if (rc)
dev_err(mmc_dev(sdhci->mmc),
"switching to 3.3V also failed\n");
}
}
/* Wait for 10 msec for the voltage to be switched */
mdelay(10);
/* Enable the card clock */
clk |= SDHCI_CLOCK_CARD_EN;
sdhci_writew(sdhci, clk, SDHCI_CLOCK_CONTROL);
/* Wait for 1 msec after enabling clock */
mdelay(1);
return rc;
}
static void tegra_sdhci_reset(struct sdhci_host *sdhci, u8 mask)
{
unsigned long timeout;
sdhci_writeb(sdhci, mask, SDHCI_SOFTWARE_RESET);
/* Wait max 100 ms */
timeout = 100;
/* hw clears the bit when it's done */
while (sdhci_readb(sdhci, SDHCI_SOFTWARE_RESET) & mask) {
if (timeout == 0) {
dev_err(mmc_dev(sdhci->mmc), "Reset 0x%x never"
"completed.\n", (int)mask);
return;
}
timeout--;
mdelay(1);
}
tegra_sdhci_reset_exit(sdhci, mask);
}
static void sdhci_tegra_set_tap_delay(struct sdhci_host *sdhci,
unsigned int tap_delay)
{
u32 vendor_ctrl;
/* Max tap delay value is 255 */
BUG_ON(tap_delay > MAX_TAP_VALUES);
vendor_ctrl = sdhci_readl(sdhci, SDHCI_VNDR_CLK_CTRL);
vendor_ctrl &= ~(0xFF << SDHCI_VNDR_CLK_CTRL_TAP_VALUE_SHIFT);
vendor_ctrl |= (tap_delay << SDHCI_VNDR_CLK_CTRL_TAP_VALUE_SHIFT);
sdhci_writel(sdhci, vendor_ctrl, SDHCI_VNDR_CLK_CTRL);
}
static int sdhci_tegra_sd_error_stats(struct sdhci_host *host, u32 int_status)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_tegra *tegra_host = pltfm_host->priv;
struct sdhci_tegra_sd_stats *head;
head = tegra_host->sd_stat_head;
if (int_status & SDHCI_INT_DATA_CRC)
head->data_crc_count++;
if (int_status & SDHCI_INT_CRC)
head->cmd_crc_count++;
if (int_status & SDHCI_INT_TIMEOUT)
head->cmd_to_count++;
if (int_status & SDHCI_INT_DATA_TIMEOUT)
head->data_to_count++;
return 0;
}
static void sdhci_tegra_dump_tuning_data(struct sdhci_host *sdhci, u8 freq_band)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
struct sdhci_tegra *tegra_host = pltfm_host->priv;
if (tegra_host->tuning_data.tap_data[0]) {
dev_info(mmc_dev(sdhci->mmc), "Tuning window data at %d mV\n",
tuning_params[freq_band].voltages[0]);
pr_info("Partial window %d\n",
tegra_host->tuning_data.tap_data[0]->partial_win);
pr_info("full window start %d\n",
tegra_host->tuning_data.tap_data[0]->full_win_begin);
pr_info("full window end %d\n",
tegra_host->tuning_data.tap_data[0]->full_win_end);
}
if (((freq_band == TUNING_HIGH_FREQ) ||
(freq_band == TUNING_HIGH_FREQ_HV)) &&
(tegra_host->tuning_data.tap_data[1])) {
dev_info(mmc_dev(sdhci->mmc), "Tuning window data at %d mV\n",
tuning_params[freq_band].voltages[1]);
pr_info("partial window %d\n",
tegra_host->tuning_data.tap_data[1]->partial_win);
pr_info("full window being %d\n",
tegra_host->tuning_data.tap_data[1]->full_win_begin);
pr_info("full window end %d\n",
tegra_host->tuning_data.tap_data[1]->full_win_end);
}
pr_info("%s window chosen\n",
tegra_host->tuning_data.select_partial_win ?
"partial" : "full");
pr_info("Best tap value %d\n",
tegra_host->tuning_data.best_hv_tap_value);
pr_info("Best tap value %d\n",
tegra_host->tuning_data.best_tap_value);
}
/*
* Calculation of best tap value for low frequencies(82MHz).
* X = Partial win, Y = Full win start, Z = Full win end.
* UI = Z - X.
* Full Window = Z - Y.
* Taps margin = mid-point of 1/2*(curr_freq/max_frequency)*UI
* = (1/2)*(1/2)*(82/200)*UI
* = (0.1025)*UI
* if Partial win<(0.22)*UI
* best tap = Y+(0.1025*UI)
* else
* best tap = (X-(Z-Y))+(0.1025*UI)
* If best tap<0, best tap = 0
*/
static void calculate_low_freq_tap_value(struct sdhci_host *sdhci)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
struct sdhci_tegra *tegra_host = pltfm_host->priv;
unsigned int curr_clock;
unsigned int max_clock;
int best_tap_value;
struct tap_window_data *tap_data;
struct tegra_tuning_data *tuning_data;
tuning_data = &tegra_host->tuning_data;
tap_data = tuning_data->tap_data[0];
if (tap_data->abandon_full_win) {
if (tap_data->abandon_partial_win) {
tuning_data->best_tap_value = 0;
return;
} else {
tuning_data->select_partial_win = true;
goto calculate_best_tap;
}
}
tap_data->tuning_ui = tap_data->full_win_end - tap_data->partial_win;
/* Calculate the sampling point */
curr_clock = sdhci->max_clk / 1000000;
max_clock = uhs_max_freq_MHz[sdhci->mmc->ios.timing];
tap_data->sampling_point = ((tap_data->tuning_ui * curr_clock) /
max_clock);
tap_data->sampling_point >>= 2;
/*
* Check whether partial window should be used. Use partial window
* if partial window > 0.22(UI).
*/
if ((!tap_data->abandon_partial_win) &&
(tap_data->partial_win > ((22 * tap_data->tuning_ui) / 100)))
tuning_data->select_partial_win = true;
calculate_best_tap:
if (tuning_data->select_partial_win) {
best_tap_value = (tap_data->partial_win -
(tap_data->full_win_end - tap_data->full_win_begin)) +
tap_data->sampling_point;
tuning_data->best_tap_value = (best_tap_value < 0) ? 0 :
best_tap_value;
} else {
tuning_data->best_tap_value = tap_data->full_win_begin +
tap_data->sampling_point;
}
}
/*
* Calculation of best tap value for high frequencies(156MHz).
* Tap window data at 1.25V core voltage
* X = Partial win, Y = Full win start, Z = Full win end.
* Full Window = Z-Y.
* UI = Z-X.
* Tap_margin = (0.20375)UI
*
* Tap window data at 1.1V core voltage
* X' = Partial win, Y' = Full win start, Z' = Full win end.
* UI' = Z'-X'.
* Full Window' = Z'-Y'.
* Tap_margin' = (0.20375)UI'
*
* Full_window_tap=[(Z'-0.20375UI')+(Y+0.20375UI)]/2
* Partial_window_tap=[(X'-0.20375UI')+(X-(Z-Y)+0x20375UI)]/2
* if(Partial_window_tap < 0), Partial_window_tap=0
*
* Full_window_quality=[(Z'-0.20375UI')-(Y+0.20375UI)]/2
* Partial_window_quality=(X'-0.20375UI')-Partial_window_tap
* if(Full_window_quality>Partial_window_quality) choose full window,
* else choose partial window.
* If there is no margin window for both cases,
* best tap=(Y+Z')/2.
*/
static unsigned int calculate_high_freq_tap_value(struct sdhci_host *sdhci)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
struct sdhci_tegra *tegra_host = pltfm_host->priv;
unsigned int curr_clock;
unsigned int max_clock;
unsigned int best_tap;
struct tap_window_data *vmax_tap_data;
struct tap_window_data *vmid_tap_data;
struct tegra_tuning_data *tuning_data;
unsigned int full_win_tap;
int partial_win_start;
int partial_win_tap;
int full_win_quality;
int partial_win_quality;
tuning_data = &tegra_host->tuning_data;
vmax_tap_data = tuning_data->tap_data[0];
vmid_tap_data = tuning_data->tap_data[1];
/*
* If tuning at min override voltage is not done or one shot tuning is
* done, set the best tap value as 70% of the full window.
*/
if (!tuning_data->overide_vcore_tuning_done ||
tuning_data->one_shot_tuning) {
dev_info(mmc_dev(sdhci->mmc),
"Setting best tap as 50 percent of the full window\n");
best_tap = (vmax_tap_data->full_win_begin +
(((vmax_tap_data->full_win_end -
vmax_tap_data->full_win_begin) * 5) / 10));
return best_tap;
}
curr_clock = sdhci->max_clk / 1000000;
max_clock = uhs_max_freq_MHz[sdhci->mmc->ios.timing];
/*
* Calculate the tuning_ui and sampling points for tap windows found
* at all core voltages.
*/
vmax_tap_data->tuning_ui = vmax_tap_data->full_win_end -
vmax_tap_data->partial_win;
vmax_tap_data->sampling_point =
(vmax_tap_data->tuning_ui * curr_clock) / max_clock;
vmax_tap_data->sampling_point >>= 2;
vmid_tap_data->tuning_ui = vmid_tap_data->full_win_end -
vmid_tap_data->partial_win;
vmid_tap_data->sampling_point =
(vmid_tap_data->tuning_ui * curr_clock) / max_clock;
vmid_tap_data->sampling_point >>= 2;
full_win_tap = ((vmid_tap_data->full_win_end -
vmid_tap_data->sampling_point) +
(vmax_tap_data->full_win_begin +
vmax_tap_data->sampling_point));
full_win_tap >>= 1;
full_win_quality = (vmid_tap_data->full_win_end -
vmid_tap_data->sampling_point) -
(vmax_tap_data->full_win_begin +
vmax_tap_data->sampling_point);
full_win_quality >>= 1;
partial_win_start = (vmax_tap_data->partial_win -
(vmax_tap_data->full_win_end -
vmax_tap_data->full_win_begin));
partial_win_tap = ((vmid_tap_data->partial_win -
vmid_tap_data->sampling_point) +
(partial_win_start + vmax_tap_data->sampling_point));
partial_win_tap >>= 1;
if (partial_win_tap < 0)
partial_win_tap = 0;
partial_win_quality = (vmid_tap_data->partial_win -
vmid_tap_data->sampling_point) - partial_win_tap;
if ((full_win_quality <= 0) && (partial_win_quality)) {
dev_warn(mmc_dev(sdhci->mmc),
"No margin window for both windows\n");
best_tap = vmax_tap_data->full_win_begin +
vmid_tap_data->full_win_end;
best_tap >>= 1;
} else {
if (full_win_quality > partial_win_quality) {
best_tap = full_win_tap;
tuning_data->select_partial_win = false;
} else {
best_tap = partial_win_tap;
tuning_data->select_partial_win = true;
}
}
return best_tap;
}
static int sdhci_tegra_run_frequency_tuning(struct sdhci_host *sdhci)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
struct sdhci_tegra *tegra_host = pltfm_host->priv;
int err = 0;
u8 ctrl;
u32 mask;
unsigned int timeout = 10;
int flags;
u32 intstatus;
if (gpio_is_valid(tegra_host->plat->cd_gpio) &&
(gpio_get_value(tegra_host->plat->cd_gpio) != 0))
return -ENOMEDIUM;
mask = SDHCI_CMD_INHIBIT | SDHCI_DATA_INHIBIT;
while (sdhci_readl(sdhci, SDHCI_PRESENT_STATE) & mask) {
if (timeout == 0) {
dev_err(mmc_dev(sdhci->mmc), "Controller never"
"released inhibit bit(s).\n");
err = -ETIMEDOUT;
goto out;
}
timeout--;
mdelay(1);
}
ctrl = sdhci_readb(sdhci, SDHCI_HOST_CONTROL2);
ctrl &= ~SDHCI_CTRL_TUNED_CLK;
sdhci_writeb(sdhci, ctrl, SDHCI_HOST_CONTROL2);
ctrl = sdhci_readb(sdhci, SDHCI_HOST_CONTROL2);
ctrl |= SDHCI_CTRL_EXEC_TUNING;
sdhci_writeb(sdhci, ctrl, SDHCI_HOST_CONTROL2);
/*
* In response to CMD19, the card sends 64 bytes of tuning
* block to the Host Controller. So we set the block size
* to 64 here.
* In response to CMD21, the card sends 128 bytes of tuning
* block for MMC_BUS_WIDTH_8 and 64 bytes for MMC_BUS_WIDTH_4
* to the Host Controller. So we set the block size to 64 here.
*/
sdhci_writew(sdhci, SDHCI_MAKE_BLKSZ(7, tegra_host->tuning_bsize),
SDHCI_BLOCK_SIZE);
sdhci_writeb(sdhci, 0xE, SDHCI_TIMEOUT_CONTROL);
sdhci_writeb(sdhci, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
sdhci_writel(sdhci, 0x0, SDHCI_ARGUMENT);
/* Set the cmd flags */
flags = SDHCI_CMD_RESP_SHORT | SDHCI_CMD_CRC | SDHCI_CMD_DATA;
/* Issue the command */
sdhci_writew(sdhci, SDHCI_MAKE_CMD(
tegra_host->tuning_opcode, flags), SDHCI_COMMAND);
timeout = 5;
do {
timeout--;
mdelay(1);
intstatus = sdhci_readl(sdhci, SDHCI_INT_STATUS);
if (intstatus) {
sdhci_writel(sdhci, intstatus, SDHCI_INT_STATUS);
break;
}
} while(timeout);
if ((intstatus & SDHCI_INT_DATA_AVAIL) &&
!(intstatus & SDHCI_INT_DATA_CRC)) {
err = 0;
sdhci->tuning_done = 1;
} else {
tegra_sdhci_reset(sdhci, SDHCI_RESET_CMD);
tegra_sdhci_reset(sdhci, SDHCI_RESET_DATA);
err = -EIO;
}
if (sdhci->tuning_done) {
sdhci->tuning_done = 0;
ctrl = sdhci_readb(sdhci, SDHCI_HOST_CONTROL2);
if (!(ctrl & SDHCI_CTRL_EXEC_TUNING) &&
(ctrl & SDHCI_CTRL_TUNED_CLK))
err = 0;
else
err = -EIO;
}
mdelay(1);
out:
return err;
}
static int sdhci_tegra_scan_tap_values(struct sdhci_host *sdhci,
unsigned int starting_tap, bool expect_failure)
{
unsigned int tap_value = starting_tap;
int err;
unsigned int retry = TUNING_RETRIES;
do {
/* Set the tap delay */
sdhci_tegra_set_tap_delay(sdhci, tap_value);
/* Run frequency tuning */
err = sdhci_tegra_run_frequency_tuning(sdhci);
if (err == -ENOMEDIUM)
return err;
if (err && retry) {
retry--;
continue;
} else {
retry = TUNING_RETRIES;
if ((expect_failure && !err) ||
(!expect_failure && err))
break;
}
tap_value++;
} while (tap_value <= MAX_TAP_VALUES);
return tap_value;
}
/*
* While scanning for tap values, first get the partial window followed by the
* full window. Note that, when scanning for full win start, tuning has to be
* run until a passing tap value is found. Hence, failure is expected during
* this process and ignored.
*/
static int sdhci_tegra_get_tap_window_data(struct sdhci_host *sdhci,
struct tap_window_data *tap_data)
{
unsigned int tap_value;
unsigned int full_win_percentage = 0;
int err = 0;
if (!tap_data) {
dev_err(mmc_dev(sdhci->mmc), "Invalid tap data\n");
return -ENODATA;
}
/* Get the partial window data */
tap_value = 0;
tap_value = sdhci_tegra_scan_tap_values(sdhci, tap_value, false);
if (tap_value < 0) {
err = tap_value;
goto out;
} else if (!tap_value) {
tap_data->abandon_partial_win = true;
tap_data->partial_win = 0;
} else if (tap_value > MAX_TAP_VALUES) {
/*
* If tap value is more than 0xFF, we have hit the miracle case
* of all tap values passing. Discard full window as passing
* window has covered all taps.
*/
tap_data->partial_win = MAX_TAP_VALUES;
tap_data->abandon_full_win = true;
goto out;
} else {
tap_data->partial_win = tap_value - 1;
if (tap_value == MAX_TAP_VALUES) {
/* All tap values exhausted. No full window */
tap_data->abandon_full_win = true;
goto out;
}
}
do {
/* Get the full window start */
tap_value++;
tap_value = sdhci_tegra_scan_tap_values(sdhci, tap_value, true);
if (tap_value < 0) {
err = tap_value;
goto out;
} else if (tap_value > MAX_TAP_VALUES) {
/* All tap values exhausted. No full window */
tap_data->abandon_full_win = true;
goto out;
} else {
tap_data->full_win_begin = tap_value;
/*
* If full win start is 0xFF, then set that as
* full win end and exit.
*/
if (tap_value == MAX_TAP_VALUES) {
tap_data->full_win_end = tap_value;
goto out;
}
}
/* Get the full window end */
tap_value++;
tap_value = sdhci_tegra_scan_tap_values(sdhci,
tap_value, false);
if (tap_value < 0) {
err = tap_value;
goto out;
}
tap_data->full_win_end = tap_value - 1;
if (tap_value > MAX_TAP_VALUES)
tap_data->full_win_end = MAX_TAP_VALUES;
full_win_percentage = ((tap_data->full_win_end -
tap_data->full_win_begin) * 100) /
(tap_data->partial_win + 1);
} while (full_win_percentage < 50 && tap_value < MAX_TAP_VALUES);
if (full_win_percentage < 50)
tap_data->abandon_full_win = true;
out:
/*
* Mark tuning as failed if both partial and full windows are
* abandoned.
*/
if (tap_data->abandon_partial_win && tap_data->abandon_full_win)
err = -EIO;
return err;
}
static int sdhci_tegra_execute_tuning(struct sdhci_host *sdhci, u32 opcode)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
struct sdhci_tegra *tegra_host = pltfm_host->priv;
struct tegra_tuning_data *tuning_data;
struct tap_window_data *tap_data;
int err;
u16 ctrl_2;
u32 ier;
unsigned int freq_band;
unsigned int i = 0;
unsigned int j = 0;
unsigned int voltage = 0;
bool vcore_override_failed = false;
static unsigned int vcore_lvl;
/* Tuning is valid only in SDR104 and SDR50 modes */
ctrl_2 = sdhci_readw(sdhci, SDHCI_HOST_CONTROL2);
if (!(((ctrl_2 & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR104) ||
(((ctrl_2 & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) &&
(sdhci->flags & SDHCI_SDR50_NEEDS_TUNING))))
return 0;
/* Tuning should be done only for MMC_BUS_WIDTH_8 and MMC_BUS_WIDTH_4 */
if (sdhci->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
tegra_host->tuning_bsize = MMC_TUNING_BLOCK_SIZE_BUS_WIDTH_8;
else if (sdhci->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
tegra_host->tuning_bsize = MMC_TUNING_BLOCK_SIZE_BUS_WIDTH_4;
else
return -EINVAL;
sdhci->flags &= ~SDHCI_NEEDS_RETUNING;
/* Set the tuning command to be used */
tegra_host->tuning_opcode = opcode;
/*
* Disable all interrupts signalling.Enable interrupt status
* detection for buffer read ready and data crc. We use
* polling for tuning as it involves less overhead.
*/
ier = sdhci_readl(sdhci, SDHCI_INT_ENABLE);
sdhci_writel(sdhci, 0, SDHCI_SIGNAL_ENABLE);
sdhci_writel(sdhci, SDHCI_INT_DATA_AVAIL |
SDHCI_INT_DATA_CRC, SDHCI_INT_ENABLE);
if (sdhci->max_clk > tuning_params[TUNING_LOW_FREQ].freq_hz)
freq_band = TUNING_HIGH_FREQ;
else
freq_band = TUNING_LOW_FREQ;
tuning_data = &tegra_host->tuning_data;
/*
* If tuning is already done and retune request is not set, then skip
* best tap value calculation and use the old best tap value.
*/
if ((tegra_host->tuning_status == TUNING_STATUS_DONE) &&
(tegra_host->instance != 0)) {
dev_info(mmc_dev(sdhci->mmc),
"Tuning already done. Setting tuned tap value %d\n",
tegra_host->tuning_data.best_tap_value);
goto set_best_tap;
}
/* Remove any previously set override voltages */
if (tegra_host->set_tuning_override) {
spin_unlock(&sdhci->lock);
tegra_dvfs_override_core_voltage(0);
spin_lock(&sdhci->lock);
vcore_lvl = 0;
tegra_host->set_tuning_override = false;
}
/*
* Run tuning and get the passing tap window info for all frequencies
* and core voltages required to calculate the final tap value. The
* standard driver calls this platform specific tuning callback after
* holding a lock. The spinlock needs to be released when calling
* non-atomic context functions like regulator calls etc.
*/
for (j = 2 ; j >= 1; j--) {
if ((tegra_host->instance == 0) && (freq_band != TUNING_LOW_FREQ))
freq_band = j;
else
j = j-1;
for (i = 0; i < tuning_params[freq_band].nr_voltages; i++) {
spin_unlock(&sdhci->lock);
if (!tuning_data->tap_data[i]) {
tuning_data->tap_data[i] = devm_kzalloc(
mmc_dev(sdhci->mmc),
sizeof(struct tap_window_data), GFP_KERNEL);
if (!tuning_data->tap_data[i]) {
err = -ENOMEM;
dev_err(mmc_dev(sdhci->mmc),
"Insufficient memory for tap window info\n");
spin_lock(&sdhci->lock);
goto out;
}
}
tap_data = tuning_data->tap_data[i];
/*
* If nominal vcore is not specified, run tuning once and set
* the tap value. Tuning might fail but this is a better option
* than not trying tuning at all.
*/
if (!tegra_host->nominal_vcore_mv) {
dev_err(mmc_dev(sdhci->mmc),
"Missing nominal vcore. Tuning might fail\n");
tuning_data->one_shot_tuning = true;
goto skip_vcore_override;
}
voltage = tuning_params[freq_band].voltages[i];
if (voltage > tegra_host->nominal_vcore_mv) {
voltage = tegra_host->nominal_vcore_mv;
if ((tuning_data->nominal_vcore_tuning_done) &&
(tuning_params[freq_band].nr_voltages == 1)) {
spin_lock(&sdhci->lock);
continue;
}
} else if (voltage < tegra_host->min_vcore_override_mv) {
voltage = tegra_host->min_vcore_override_mv;
if ((tuning_data->overide_vcore_tuning_done) &&
(tuning_params[freq_band].nr_voltages == 1)) {
spin_lock(&sdhci->lock);
continue;
}
}
if (voltage != vcore_lvl) {
/* Boost emc clock to 900MHz before setting 1.39V */
if ((voltage == 1390) && tegra_host->emc_clk) {
err = clk_prepare_enable(tegra_host->emc_clk);
if (err)
dev_err(mmc_dev(sdhci->mmc),
"1.39V emc freq boost failed %d\n",
err);
else
tegra_host->emc_clk_enabled = true;
}
err = tegra_dvfs_override_core_voltage(voltage);
if (err) {
vcore_override_failed = true;
dev_err(mmc_dev(sdhci->mmc),
"Setting tuning override_mv %d failed %d\n",
voltage, err);
} else {
vcore_lvl = voltage;
}
}
spin_lock(&sdhci->lock);
skip_vcore_override:
/* Get the tuning window info */
err = sdhci_tegra_get_tap_window_data(sdhci, tap_data);
if (err) {
dev_err(mmc_dev(sdhci->mmc), "No tuning window\n");
goto out;
}
if (tuning_data->one_shot_tuning) {
spin_lock(&sdhci->lock);
tuning_data->nominal_vcore_tuning_done = true;
tuning_data->overide_vcore_tuning_done = true;
break;
}
if (!vcore_override_failed) {
if (voltage == tegra_host->nominal_vcore_mv)
tuning_data->nominal_vcore_tuning_done = true;
if (voltage >= tegra_host->min_vcore_override_mv)
tuning_data->overide_vcore_tuning_done = true;
}
/* Release the override voltage setting */
spin_unlock(&sdhci->lock);
err = tegra_dvfs_override_core_voltage(0);
if (err)
dev_err(mmc_dev(sdhci->mmc),
"Clearing tuning override voltage failed %d\n",
err);
else
vcore_lvl = 0;
if (tegra_host->emc_clk_enabled) {
clk_disable_unprepare(tegra_host->emc_clk);
tegra_host->emc_clk_enabled = false;
}
spin_lock(&sdhci->lock);
}
/* If setting min override voltage failed for the first time, set
* nominal core voltage as override until retuning is done.
*/
if ((tegra_host->tuning_status != TUNING_STATUS_RETUNE) &&
tuning_data->nominal_vcore_tuning_done &&
!tuning_data->overide_vcore_tuning_done)
tegra_host->set_tuning_override = true;
/* Calculate best tap for current freq band */
if (freq_band == TUNING_LOW_FREQ) {
calculate_low_freq_tap_value(sdhci);
} else {
if (freq_band == TUNING_HIGH_FREQ) {
tegra_host->tuning_data.best_tap_value =
calculate_high_freq_tap_value(sdhci);
goto set_best_tap;
} else {
tegra_host->tuning_data.best_hv_tap_value =
calculate_high_freq_tap_value(sdhci);
goto set_best_tap;
}
}
set_best_tap:
/* Dump the tap window data */
sdhci_tegra_dump_tuning_data(sdhci, freq_band);
}
sdhci_tegra_set_tap_delay(sdhci,
tegra_host->tuning_data.best_tap_value);
/*
* Run tuning with the best tap value. If tuning fails, set the status
* for retuning next time enumeration is done.
*/
err = sdhci_tegra_run_frequency_tuning(sdhci);
if (err) {
tuning_data->nominal_vcore_tuning_done = false;
tuning_data->overide_vcore_tuning_done = false;
tegra_host->tuning_status = TUNING_STATUS_RETUNE;
} else {
if (tuning_data->nominal_vcore_tuning_done &&
tuning_data->overide_vcore_tuning_done)
tegra_host->tuning_status = TUNING_STATUS_DONE;
else
tegra_host->tuning_status = TUNING_STATUS_RETUNE;
}
out:
/*
* Lock down the core voltage if tuning at override voltage failed
* for the first time. The override setting will be removed once
* retuning is called.
*/
if ((tegra_host->set_tuning_override) &&
(err != -ENOMEDIUM)) {
dev_info(mmc_dev(sdhci->mmc),
"Nominal core voltage being set until retuning\n");
spin_unlock(&sdhci->lock);
err = tegra_dvfs_override_core_voltage(
tegra_host->nominal_vcore_mv);
if (err)
dev_err(mmc_dev(sdhci->mmc),
"Setting tuning override voltage failed %d\n",
err);
else
vcore_lvl = tegra_host->nominal_vcore_mv;
spin_lock(&sdhci->lock);
/* Schedule for the retuning */
mod_timer(&sdhci->tuning_timer, jiffies +
10 * HZ);
}
/* Enable interrupts. Enable full range for core voltage */
sdhci_writel(sdhci, ier, SDHCI_INT_ENABLE);
sdhci_writel(sdhci, ier, SDHCI_SIGNAL_ENABLE);
return err;
}
static int tegra_sdhci_suspend(struct sdhci_host *sdhci)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
struct sdhci_tegra *tegra_host = pltfm_host->priv;
tegra_sdhci_set_clock(sdhci, 0);
/* Disable the power rails if any */
if (tegra_host->card_present) {
if (tegra_host->is_rail_enabled) {
if (tegra_host->vdd_io_reg)
regulator_disable(tegra_host->vdd_io_reg);
if (tegra_host->vdd_slot_reg)
regulator_disable(tegra_host->vdd_slot_reg);
tegra_host->is_rail_enabled = 0;
}
}
if (tegra_host->dpd) {
mutex_lock(&tegra_host->dpd->delay_lock);
tegra_host->dpd->need_delay_dpd = 1;
mutex_unlock(&tegra_host->dpd->delay_lock);
}
return 0;
}
static int tegra_sdhci_resume(struct sdhci_host *sdhci)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
struct sdhci_tegra *tegra_host = pltfm_host->priv;
struct platform_device *pdev;
struct tegra_sdhci_platform_data *plat;
pdev = to_platform_device(mmc_dev(sdhci->mmc));
plat = pdev->dev.platform_data;
if (gpio_is_valid(plat->cd_gpio))
tegra_host->card_present = (gpio_get_value(plat->cd_gpio) == 0);
/* Enable the power rails if any */
if (tegra_host->card_present) {
if (!tegra_host->is_rail_enabled) {
if (tegra_host->vdd_slot_reg)
regulator_enable(tegra_host->vdd_slot_reg);
if (tegra_host->vdd_io_reg) {
regulator_enable(tegra_host->vdd_io_reg);
if (plat->mmc_data.ocr_mask &
SDHOST_1V8_OCR_MASK)
tegra_sdhci_signal_voltage_switch(sdhci,
MMC_SIGNAL_VOLTAGE_180);
else
tegra_sdhci_signal_voltage_switch(sdhci,
MMC_SIGNAL_VOLTAGE_330);
}
tegra_host->is_rail_enabled = 1;
}
}
/* Setting the min identification clock of freq 400KHz */
tegra_sdhci_set_clock(sdhci, 400000);
/* Reset the controller and power on if MMC_KEEP_POWER flag is set*/
if (sdhci->mmc->pm_flags & MMC_PM_KEEP_POWER) {
tegra_sdhci_reset(sdhci, SDHCI_RESET_ALL);
sdhci_writeb(sdhci, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
sdhci->pwr = 0;
}
return 0;
}
static void tegra_sdhci_post_resume(struct sdhci_host *sdhci)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
struct sdhci_tegra *tegra_host = pltfm_host->priv;
if (tegra_host->card_present) {
if (tegra_host->sd_detect_in_suspend)
tasklet_schedule(&sdhci->card_tasklet);
} else if (tegra_host->clk_enabled) {
/* Turn OFF the clocks if the card is not present */
tegra_sdhci_set_clock(sdhci, 0);
}
}
static void sdhci_tegra_error_stats_debugfs(struct sdhci_host *host)
{
struct dentry *root;
root = debugfs_create_dir(dev_name(mmc_dev(host->mmc)), NULL);
if (IS_ERR(root))
/* Don't complain -- debugfs just isn't enabled */
return;
if (!root)
/* Complain -- debugfs is enabled, but it failed to
* create the directory. */
goto err_root;
host->debugfs_root = root;
if (!debugfs_create_file("error_stats", S_IRUSR, root, host,
&sdhci_host_fops))
goto err_node;
return;
err_node:
debugfs_remove_recursive(root);
host->debugfs_root = NULL;
err_root:
pr_err("%s: Failed to initialize debugfs functionality\n", __func__);
return;
}
static struct sdhci_ops tegra_sdhci_ops = {
#ifndef CONFIG_ARCH_TEGRA_11x_SOC
.get_ro = tegra_sdhci_get_ro,
#endif
.get_cd = tegra_sdhci_get_cd,
.read_l = tegra_sdhci_readl,
.read_w = tegra_sdhci_readw,
.write_l = tegra_sdhci_writel,
.platform_8bit_width = tegra_sdhci_8bit,
.set_clock = tegra_sdhci_set_clock,
.suspend = tegra_sdhci_suspend,
.resume = tegra_sdhci_resume,
.platform_resume = tegra_sdhci_post_resume,
.platform_reset_exit = tegra_sdhci_reset_exit,
.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
.switch_signal_voltage = tegra_sdhci_signal_voltage_switch,
.switch_signal_voltage_exit = tegra_sdhci_do_calibration,
.execute_freq_tuning = sdhci_tegra_execute_tuning,
.sd_error_stats = sdhci_tegra_sd_error_stats,
};
static struct sdhci_pltfm_data sdhci_tegra20_pdata = {
.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
#ifndef CONFIG_ARCH_TEGRA_2x_SOC
SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
SDHCI_QUIRK_NON_STD_VOLTAGE_SWITCHING |
SDHCI_QUIRK_NON_STANDARD_TUNING |
#endif
#ifdef CONFIG_ARCH_TEGRA_3x_SOC
SDHCI_QUIRK_NONSTANDARD_CLOCK |
#endif
SDHCI_QUIRK_SINGLE_POWER_WRITE |
SDHCI_QUIRK_NO_HISPD_BIT |
SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
SDHCI_QUIRK_BROKEN_CARD_DETECTION |
SDHCI_QUIRK_NO_CALC_MAX_DISCARD_TO,
.quirks2 = SDHCI_QUIRK2_BROKEN_PRESET_VALUES,
.ops = &tegra_sdhci_ops,
};
static struct sdhci_tegra_soc_data soc_data_tegra20 = {
.pdata = &sdhci_tegra20_pdata,
.nvquirks = NVQUIRK_FORCE_SDHCI_SPEC_200 |
#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
NVQUIRK_ENABLE_PADPIPE_CLKEN |
NVQUIRK_DISABLE_SPI_MODE_CLKEN |
NVQUIRK_EN_FEEDBACK_CLK |
NVQUIRK_SET_TAP_DELAY |
NVQUIRK_ENABLE_SDR50_TUNING |
NVQUIRK_ENABLE_SDR50 |
NVQUIRK_ENABLE_SDR104 |
#endif
#if defined(CONFIG_ARCH_TEGRA_11x_SOC)
NVQUIRK_SET_DRIVE_STRENGTH |
#endif
#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
NVQUIRK_DISABLE_AUTO_CALIBRATION |
#elif defined(CONFIG_ARCH_TEGRA_3x_SOC)
NVQUIRK_SET_CALIBRATION_OFFSETS |
NVQUIRK_ENABLE_SD_3_0 |
#else
NVQUIRK_SET_TRIM_DELAY |
NVQUIRK_ENABLE_DDR50 |
NVQUIRK_INFINITE_ERASE_TIMEOUT |
NVQUIRK_DISABLE_AUTO_CMD23 |
#endif
NVQUIRK_ENABLE_BLOCK_GAP_DET,
};
#ifdef CONFIG_ARCH_TEGRA_3x_SOC
static struct sdhci_pltfm_data sdhci_tegra30_pdata = {
.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
SDHCI_QUIRK_SINGLE_POWER_WRITE |
SDHCI_QUIRK_NO_HISPD_BIT |
SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC,
.ops = &tegra_sdhci_ops,
};
static struct sdhci_tegra_soc_data soc_data_tegra30 = {
.pdata = &sdhci_tegra30_pdata,
};
#endif
static const struct of_device_id sdhci_tegra_dt_match[] __devinitdata = {
#ifdef CONFIG_ARCH_TEGRA_3x_SOC
{ .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 },
#endif
#ifdef CONFIG_ARCH_TEGRA_2x_SOC
{ .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 },
#endif
{}
};
MODULE_DEVICE_TABLE(of, sdhci_dt_ids);
static struct tegra_sdhci_platform_data * __devinit sdhci_tegra_dt_parse_pdata(
struct platform_device *pdev)
{
struct tegra_sdhci_platform_data *plat;
struct device_node *np = pdev->dev.of_node;
if (!np)
return NULL;
plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
if (!plat) {
dev_err(&pdev->dev, "Can't allocate platform data\n");
return NULL;
}
plat->cd_gpio = of_get_named_gpio(np, "cd-gpios", 0);
plat->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0);
plat->power_gpio = of_get_named_gpio(np, "power-gpios", 0);
if (of_find_property(np, "support-8bit", NULL))
plat->is_8bit = 1;
return plat;
}
static void tegra_sdhci_rail_off(struct sdhci_tegra *tegra_host)
{
if (tegra_host->is_rail_enabled) {
if (tegra_host->vdd_slot_reg)
regulator_disable(tegra_host->vdd_slot_reg);
if (tegra_host->vdd_io_reg)
regulator_disable(tegra_host->vdd_io_reg);
tegra_host->is_rail_enabled = false;
}
}
static int tegra_sdhci_reboot_notify(struct notifier_block *nb,
unsigned long event, void *data)
{
struct sdhci_tegra *tegra_host =
container_of(nb, struct sdhci_tegra, reboot_notify);
switch (event) {
case SYS_RESTART:
case SYS_POWER_OFF:
tegra_sdhci_rail_off(tegra_host);
return NOTIFY_OK;
}
return NOTIFY_DONE;
}
static ssize_t sdhci_handle_boost_mode_tap(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
int tap_enable;
u32 present_state;
char *p = (char *)buf;
/* struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
* struct sdhci_host *host = mmc_priv(mmc);
* struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
* struct sdhci_tegra *tegra_host = pltfm_host->priv;
*/
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci_host_for_sdio);
struct sdhci_tegra *tegra_host = pltfm_host->priv;
tap_enable = memparse(p, &p);
present_state = sdhci_readl(sdhci_host_for_sdio, SDHCI_PRESENT_STATE);
while ((present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ))) {
present_state = sdhci_readl(sdhci_host_for_sdio,
SDHCI_PRESENT_STATE);
}
if (tap_enable == 1) {
/* set tap value which is calculated for 1.25 to 1.39 volts*/
tegra_host->tuning_turbo_enable = 1;
dev_err(mmc_dev(sdhci_host_for_sdio->mmc),
"Setting tap value for Voltage turbo mode\n");
disable_irq(sdhci_host_for_sdio->irq);
spin_lock(&sdhci_host_for_sdio->lock);
sdhci_tegra_set_tap_delay(sdhci_host_for_sdio,
tegra_host->tuning_data.best_hv_tap_value);
spin_unlock(&sdhci_host_for_sdio->lock);
enable_irq(sdhci_host_for_sdio->irq);
} else if (tap_enable == 0) {
/* set tap value calculated for voltage range 1.1 to 1.25 */
tegra_host->tuning_turbo_enable = 0;
dev_err(mmc_dev(sdhci_host_for_sdio->mmc),
"Setting tap value for voltage non-turbo mode\n");
disable_irq(sdhci_host_for_sdio->irq);
spin_lock(&sdhci_host_for_sdio->lock);
sdhci_tegra_set_tap_delay(sdhci_host_for_sdio,
tegra_host->tuning_data.best_tap_value);
spin_unlock(&sdhci_host_for_sdio->lock);
enable_irq(sdhci_host_for_sdio->irq);
} else {
dev_err(mmc_dev(sdhci_host_for_sdio->mmc),
"Wrong value 1: enable turbo mode 0: disable turbo mode\n");
}
return count;
}
static ssize_t sdhci_show_turbo_mode(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci_host_for_sdio);
struct sdhci_tegra *tegra_host = pltfm_host->priv;
return sprintf(buf, "%d\n", tegra_host->tuning_turbo_enable);
}
static DEVICE_ATTR(handle_turbo_mode, 0644, sdhci_show_turbo_mode,
sdhci_handle_boost_mode_tap);
static int __devinit sdhci_tegra_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
const struct sdhci_tegra_soc_data *soc_data;
struct sdhci_host *host;
struct sdhci_pltfm_host *pltfm_host;
struct tegra_sdhci_platform_data *plat;
struct sdhci_tegra *tegra_host;
int rc;
match = of_match_device(sdhci_tegra_dt_match, &pdev->dev);
if (match)
soc_data = match->data;
else
soc_data = &soc_data_tegra20;
host = sdhci_pltfm_init(pdev, soc_data->pdata);
if (IS_ERR(host))
return PTR_ERR(host);
pltfm_host = sdhci_priv(host);
plat = pdev->dev.platform_data;
if (plat == NULL)
plat = sdhci_tegra_dt_parse_pdata(pdev);
if (plat == NULL) {
dev_err(mmc_dev(host->mmc), "missing platform data\n");
rc = -ENXIO;
goto err_no_plat;
}
tegra_host = devm_kzalloc(&pdev->dev, sizeof(*tegra_host), GFP_KERNEL);
if (!tegra_host) {
dev_err(mmc_dev(host->mmc), "failed to allocate tegra_host\n");
rc = -ENOMEM;
goto err_no_plat;
}
tegra_host->plat = plat;
tegra_host->sd_stat_head = devm_kzalloc(&pdev->dev, sizeof(
struct sdhci_tegra_sd_stats),
GFP_KERNEL);
if (tegra_host->sd_stat_head == NULL) {
rc = -ENOMEM;
goto err_no_plat;
}
tegra_host->soc_data = soc_data;
pltfm_host->priv = tegra_host;
pll_c = clk_get_sys(NULL, "pll_c");
if (IS_ERR(pll_c)) {
rc = PTR_ERR(pll_c);
dev_err(mmc_dev(host->mmc),
"clk error in getting pll_c: %d\n", rc);
}
pll_p = clk_get_sys(NULL, "pll_p");
if (IS_ERR(pll_p)) {
rc = PTR_ERR(pll_p);
dev_err(mmc_dev(host->mmc),
"clk error in getting pll_p: %d\n", rc);
}
pll_c_rate = clk_get_rate(pll_c);
pll_p_rate = clk_get_rate(pll_p);
#ifdef CONFIG_MMC_EMBEDDED_SDIO
if (plat->mmc_data.embedded_sdio)
mmc_set_embedded_sdio_data(host->mmc,
&plat->mmc_data.embedded_sdio->cis,
&plat->mmc_data.embedded_sdio->cccr,
plat->mmc_data.embedded_sdio->funcs,
plat->mmc_data.embedded_sdio->num_funcs);
#endif
if (gpio_is_valid(plat->power_gpio)) {
rc = gpio_request(plat->power_gpio, "sdhci_power");
if (rc) {
dev_err(mmc_dev(host->mmc),
"failed to allocate power gpio\n");
goto err_power_req;
}
gpio_direction_output(plat->power_gpio, 1);
}
if (gpio_is_valid(plat->cd_gpio)) {
rc = gpio_request(plat->cd_gpio, "sdhci_cd");
if (rc) {
dev_err(mmc_dev(host->mmc),
"failed to allocate cd gpio\n");
goto err_cd_req;
}
gpio_direction_input(plat->cd_gpio);
tegra_host->card_present = (gpio_get_value(plat->cd_gpio) == 0);
rc = request_threaded_irq(gpio_to_irq(plat->cd_gpio), NULL,
carddetect_irq,
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
mmc_hostname(host->mmc), host);
if (rc) {
dev_err(mmc_dev(host->mmc), "request irq error\n");
goto err_cd_irq_req;
}
rc = enable_irq_wake(gpio_to_irq(plat->cd_gpio));
if (rc < 0)
dev_err(mmc_dev(host->mmc),
"SD card wake-up event registration"
"failed with eroor: %d\n", rc);
} else if (plat->mmc_data.register_status_notify) {
plat->mmc_data.register_status_notify(sdhci_status_notify_cb, host);
}
if (plat->mmc_data.status) {
plat->mmc_data.card_present = plat->mmc_data.status(mmc_dev(host->mmc));
}
if (gpio_is_valid(plat->wp_gpio)) {
rc = gpio_request(plat->wp_gpio, "sdhci_wp");
if (rc) {
dev_err(mmc_dev(host->mmc),
"failed to allocate wp gpio\n");
goto err_wp_req;
}
gpio_direction_input(plat->wp_gpio);
}
/*
* If there is no card detect gpio, assume that the
* card is always present.
*/
if (!gpio_is_valid(plat->cd_gpio))
tegra_host->card_present = 1;
if (plat->mmc_data.ocr_mask & SDHOST_1V8_OCR_MASK) {
tegra_host->vddio_min_uv = SDHOST_LOW_VOLT_MIN;
tegra_host->vddio_max_uv = SDHOST_LOW_VOLT_MAX;
} else if (plat->mmc_data.ocr_mask & MMC_OCR_2V8_MASK) {
tegra_host->vddio_min_uv = SDHOST_HIGH_VOLT_2V8;
tegra_host->vddio_max_uv = SDHOST_HIGH_VOLT_MAX;
} else {
/*
* Set the minV and maxV to default
* voltage range of 2.7V - 3.6V
*/
tegra_host->vddio_min_uv = SDHOST_HIGH_VOLT_MIN;
tegra_host->vddio_max_uv = SDHOST_HIGH_VOLT_MAX;
}
tegra_host->vdd_io_reg = regulator_get(mmc_dev(host->mmc),
"vddio_sdmmc");
if (IS_ERR_OR_NULL(tegra_host->vdd_io_reg)) {
dev_info(mmc_dev(host->mmc), "%s regulator not found: %ld."
"Assuming vddio_sdmmc is not required.\n",
"vddio_sdmmc", PTR_ERR(tegra_host->vdd_io_reg));
tegra_host->vdd_io_reg = NULL;
} else {
rc = regulator_set_voltage(tegra_host->vdd_io_reg,
tegra_host->vddio_min_uv,
tegra_host->vddio_max_uv);
if (rc) {
dev_err(mmc_dev(host->mmc), "%s regulator_set_voltage failed: %d",
"vddio_sdmmc", rc);
regulator_put(tegra_host->vdd_io_reg);
tegra_host->vdd_io_reg = NULL;
}
}
tegra_host->vdd_slot_reg = regulator_get(mmc_dev(host->mmc),
"vddio_sd_slot");
if (IS_ERR_OR_NULL(tegra_host->vdd_slot_reg)) {
dev_info(mmc_dev(host->mmc), "%s regulator not found: %ld."
" Assuming vddio_sd_slot is not required.\n",
"vddio_sd_slot", PTR_ERR(tegra_host->vdd_slot_reg));
tegra_host->vdd_slot_reg = NULL;
}
if (tegra_host->card_present) {
if (tegra_host->vdd_slot_reg)
regulator_enable(tegra_host->vdd_slot_reg);
if (tegra_host->vdd_io_reg)
regulator_enable(tegra_host->vdd_io_reg);
tegra_host->is_rail_enabled = 1;
}
pm_runtime_enable(&pdev->dev);
pltfm_host->clk = clk_get(mmc_dev(host->mmc), NULL);
if (IS_ERR(pltfm_host->clk)) {
dev_err(mmc_dev(host->mmc), "clk err\n");
rc = PTR_ERR(pltfm_host->clk);
goto err_clk_get;
}
if (clk_get_parent(pltfm_host->clk) == pll_c)
tegra_host->is_parent_pllc = true;
pm_runtime_get_sync(&pdev->dev);
rc = clk_prepare_enable(pltfm_host->clk);
if (rc != 0)
goto err_clk_put;
if (!strcmp(dev_name(mmc_dev(host->mmc)), "sdhci-tegra.0")) {
tegra_host->emc_clk = clk_get(mmc_dev(host->mmc), "emc");
if (IS_ERR(tegra_host->emc_clk)) {
dev_err(mmc_dev(host->mmc), "clk err\n");
rc = PTR_ERR(tegra_host->emc_clk);
goto err_clk_put;
} else
clk_set_rate(tegra_host->emc_clk, 900000000);
}
pltfm_host->priv = tegra_host;
tegra_host->clk_enabled = true;
tegra_host->max_clk_limit = plat->max_clk_limit;
tegra_host->ddr_clk_limit = plat->ddr_clk_limit;
tegra_host->sd_detect_in_suspend = plat->sd_detect_in_suspend;
tegra_host->instance = pdev->id;
tegra_host->dpd = tegra_io_dpd_get(mmc_dev(host->mmc));
host->mmc->pm_caps |= plat->pm_caps;
host->mmc->pm_flags |= plat->pm_flags;
host->mmc->caps |= MMC_CAP_ERASE;
/* enable 1/8V DDR capable */
host->mmc->caps |= MMC_CAP_1_8V_DDR;
if (plat->is_8bit)
host->mmc->caps |= MMC_CAP_8_BIT_DATA;
host->mmc->caps |= MMC_CAP_SDIO_IRQ;
host->mmc->pm_caps |= MMC_PM_KEEP_POWER | MMC_PM_IGNORE_PM_NOTIFY;
if (plat->mmc_data.built_in) {
host->mmc->caps |= MMC_CAP_NONREMOVABLE;
}
host->mmc->pm_flags |= MMC_PM_IGNORE_PM_NOTIFY;
#ifdef CONFIG_MMC_BKOPS
host->mmc->caps2 |= MMC_CAP2_BKOPS;
#endif
tegra_sdhost_min_freq = TEGRA_SDHOST_MIN_FREQ;
#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
tegra_host->hw_ops = &tegra_2x_sdhci_ops;
tegra_sdhost_std_freq = TEGRA2_SDHOST_STD_FREQ;
#elif defined(CONFIG_ARCH_TEGRA_3x_SOC)
tegra_host->hw_ops = &tegra_3x_sdhci_ops;
tegra_sdhost_std_freq = TEGRA3_SDHOST_STD_FREQ;
#else
tegra_host->hw_ops = &tegra_11x_sdhci_ops;
tegra_sdhost_std_freq = TEGRA3_SDHOST_STD_FREQ;
host->mmc->caps2 |= MMC_CAP2_HS200;
host->mmc->caps |= MMC_CAP_CMD23;
#endif
if (plat->nominal_vcore_mv)
tegra_host->nominal_vcore_mv = plat->nominal_vcore_mv;
if (plat->min_vcore_override_mv)
tegra_host->min_vcore_override_mv = plat->min_vcore_override_mv;
pr_err("nominal vcore %d, override %d\n", tegra_host->nominal_vcore_mv,
tegra_host->min_vcore_override_mv);
rc = sdhci_add_host(host);
if (tegra_host->instance == 0) {
device_create_file(&pdev->dev, &dev_attr_handle_turbo_mode);
sdhci_host_for_sdio = host;
}
sdhci_tegra_error_stats_debugfs(host);
if (rc)
goto err_add_host;
/* Enable async suspend/resume to reduce LP0 latency */
device_enable_async_suspend(&pdev->dev);
if (plat->power_off_rail) {
tegra_host->reboot_notify.notifier_call =
tegra_sdhci_reboot_notify;
register_reboot_notifier(&tegra_host->reboot_notify);
}
return 0;
err_add_host:
clk_put(tegra_host->emc_clk);
clk_disable_unprepare(pltfm_host->clk);
pm_runtime_put_sync(&pdev->dev);
err_clk_put:
clk_put(pltfm_host->clk);
err_clk_get:
if (gpio_is_valid(plat->wp_gpio))
gpio_free(plat->wp_gpio);
err_wp_req:
if (gpio_is_valid(plat->cd_gpio))
free_irq(gpio_to_irq(plat->cd_gpio), host);
err_cd_irq_req:
if (gpio_is_valid(plat->cd_gpio))
gpio_free(plat->cd_gpio);
err_cd_req:
if (gpio_is_valid(plat->power_gpio))
gpio_free(plat->power_gpio);
err_power_req:
err_no_plat:
sdhci_pltfm_free(pdev);
return rc;
}
static int __devexit sdhci_tegra_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_tegra *tegra_host = pltfm_host->priv;
const struct tegra_sdhci_platform_data *plat = tegra_host->plat;
int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
sdhci_remove_host(host, dead);
disable_irq_wake(gpio_to_irq(plat->cd_gpio));
if (tegra_host->vdd_slot_reg) {
regulator_disable(tegra_host->vdd_slot_reg);
regulator_put(tegra_host->vdd_slot_reg);
}
if (tegra_host->vdd_io_reg) {
regulator_disable(tegra_host->vdd_io_reg);
regulator_put(tegra_host->vdd_io_reg);
}
if (gpio_is_valid(plat->wp_gpio))
gpio_free(plat->wp_gpio);
if (gpio_is_valid(plat->cd_gpio)) {
free_irq(gpio_to_irq(plat->cd_gpio), host);
gpio_free(plat->cd_gpio);
}
if (gpio_is_valid(plat->power_gpio))
gpio_free(plat->power_gpio);
if (tegra_host->clk_enabled) {
clk_disable_unprepare(pltfm_host->clk);
pm_runtime_put_sync(&pdev->dev);
}
clk_put(pltfm_host->clk);
if (plat->power_off_rail)
unregister_reboot_notifier(&tegra_host->reboot_notify);
sdhci_pltfm_free(pdev);
return 0;
}
static struct platform_driver sdhci_tegra_driver = {
.driver = {
.name = "sdhci-tegra",
.owner = THIS_MODULE,
.of_match_table = sdhci_tegra_dt_match,
.pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_tegra_probe,
.remove = __devexit_p(sdhci_tegra_remove),
};
module_platform_driver(sdhci_tegra_driver);
MODULE_DESCRIPTION("SDHCI driver for Tegra");
MODULE_AUTHOR("Google, Inc.");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
shoheiworks/Labo-EcCube | tests/class/test/util/Test_Utils.php | 2579 | <?php
/*
* This file is part of EC-CUBE
*
* Copyright(c) 2000-2014 LOCKON CO.,LTD. All Rights Reserved.
*
* http://www.lockon.co.jp/
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
/**
* テストケースで使う一般的なユーティリティを持つクラス.
*
* @author Hiroko Tamagawa
* @version $Id: Test_Utils.php 23546 2014-06-12 14:47:59Z shutta $
*/
class Test_Utils
{
/**
* 連想配列から指定されたキーだけを抜き出したものを返します.
* 入力の連想配列には変更を加えません.
*
* @static
* @param input_array 入力の連想配列
* @param map_keys 出力結果に入れたいキーを配列で指定します
* @return 指定したキーのみを持つ連想配列
*/
public static function mapArray($input_array, $map_keys)
{
$output_array = array();
foreach ($map_keys as $index => $map_key) {
$output_array[$map_key] = $input_array[$map_key];
}
return $output_array;
}
/**
* 配列の各要素(連想配列)から特定のキーだけを抜き出した配列を返します.
* 入力の連想配列には変更を加えません.
*
* @static
* @param input_array 入力の配列
* @param key 抽出対象のキー
* @return 指定のキーだけを抜き出した配列
*/
public static function mapCols($input_array, $key)
{
$output_array = array();
foreach ($input_array as $data) {
$output_array[] = $data[$key];
}
return $output_array;
}
/**
* 配列に別の配列をappendします。
* $orig_arrayが直接変更されます。
*
* @static
* @param orig_array 追加先の配列
* @param new_array 追加要素を持つ配列
*/
public static function array_append(&$orig_array, $new_array)
{
foreach ($new_array as $element) {
$orig_array[] = $element;
}
}
}
| gpl-2.0 |
AndroidDeveloperAlliance/kernel_mapphone_kexec | net/bluetooth/hci_core.c | 54421 | /*
BlueZ - Bluetooth protocol stack for Linux
Copyright (c) 2000-2001, 2010-2012 Code Aurora Forum. All rights reserved.
Written 2000,2001 by Maxim Krasnyansky <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation;
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
SOFTWARE IS DISCLAIMED.
*/
/* Bluetooth HCI core. */
#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/kmod.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/fcntl.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/rfkill.h>
#include <linux/timer.h>
#include <linux/crypto.h>
#include <net/sock.h>
#include <asm/system.h>
#include <linux/uaccess.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#define AUTO_OFF_TIMEOUT 2000
static void hci_cmd_task(unsigned long arg);
static void hci_rx_task(unsigned long arg);
static void hci_tx_task(unsigned long arg);
static DEFINE_RWLOCK(hci_task_lock);
static int enable_smp = 1;
/* HCI device list */
LIST_HEAD(hci_dev_list);
DEFINE_RWLOCK(hci_dev_list_lock);
/* HCI callback list */
LIST_HEAD(hci_cb_list);
DEFINE_RWLOCK(hci_cb_list_lock);
/* AMP Manager event callbacks */
LIST_HEAD(amp_mgr_cb_list);
DEFINE_RWLOCK(amp_mgr_cb_list_lock);
/* HCI protocols */
#define HCI_MAX_PROTO 2
struct hci_proto *hci_proto[HCI_MAX_PROTO];
/* HCI notifiers list */
static ATOMIC_NOTIFIER_HEAD(hci_notifier);
/* ---- HCI notifications ---- */
int hci_register_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_register(&hci_notifier, nb);
}
int hci_unregister_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_unregister(&hci_notifier, nb);
}
static void hci_notify(struct hci_dev *hdev, int event)
{
atomic_notifier_call_chain(&hci_notifier, event, hdev);
}
/* ---- HCI requests ---- */
void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
{
BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
/* If this is the init phase check if the completed command matches
* the last init command, and if not just return.
*/
if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
return;
if (hdev->req_status == HCI_REQ_PEND) {
hdev->req_result = result;
hdev->req_status = HCI_REQ_DONE;
wake_up_interruptible(&hdev->req_wait_q);
}
}
static void hci_req_cancel(struct hci_dev *hdev, int err)
{
BT_DBG("%s err 0x%2.2x", hdev->name, err);
if (hdev->req_status == HCI_REQ_PEND) {
hdev->req_result = err;
hdev->req_status = HCI_REQ_CANCELED;
wake_up_interruptible(&hdev->req_wait_q);
}
}
/* Execute request and wait for completion. */
static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
unsigned long opt, __u32 timeout)
{
DECLARE_WAITQUEUE(wait, current);
int err = 0;
BT_DBG("%s start", hdev->name);
hdev->req_status = HCI_REQ_PEND;
add_wait_queue(&hdev->req_wait_q, &wait);
set_current_state(TASK_INTERRUPTIBLE);
req(hdev, opt);
schedule_timeout(timeout);
remove_wait_queue(&hdev->req_wait_q, &wait);
if (signal_pending(current))
return -EINTR;
switch (hdev->req_status) {
case HCI_REQ_DONE:
err = -bt_to_errno(hdev->req_result);
break;
case HCI_REQ_CANCELED:
err = -hdev->req_result;
break;
default:
err = -ETIMEDOUT;
break;
}
hdev->req_status = hdev->req_result = 0;
BT_DBG("%s end: err %d", hdev->name, err);
return err;
}
static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
unsigned long opt, __u32 timeout)
{
int ret;
if (!test_bit(HCI_UP, &hdev->flags))
return -ENETDOWN;
/* Serialize all requests */
hci_req_lock(hdev);
ret = __hci_request(hdev, req, opt, timeout);
hci_req_unlock(hdev);
return ret;
}
static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
{
BT_DBG("%s %ld", hdev->name, opt);
/* Reset device */
set_bit(HCI_RESET, &hdev->flags);
memset(&hdev->features, 0, sizeof(hdev->features));
hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
}
static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
{
struct hci_cp_delete_stored_link_key cp;
struct sk_buff *skb;
__le16 param;
__u8 flt_type;
BT_DBG("%s %ld", hdev->name, opt);
/* Driver initialization */
/* Special commands */
while ((skb = skb_dequeue(&hdev->driver_init))) {
bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
skb->dev = (void *) hdev;
skb_queue_tail(&hdev->cmd_q, skb);
tasklet_schedule(&hdev->cmd_task);
}
skb_queue_purge(&hdev->driver_init);
/* Mandatory initialization */
/* Reset */
if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
set_bit(HCI_RESET, &hdev->flags);
hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
}
/* Read Local Version */
hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
/* Set default HCI Flow Control Mode */
if (hdev->dev_type == HCI_BREDR)
hdev->flow_ctl_mode = HCI_PACKET_BASED_FLOW_CTL_MODE;
else
hdev->flow_ctl_mode = HCI_BLOCK_BASED_FLOW_CTL_MODE;
/* Read HCI Flow Control Mode */
hci_send_cmd(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
/* Read Buffer Size (ACL mtu, max pkt, etc.) */
hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
/* Read Data Block Size (ACL mtu, max pkt, etc.) */
hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
#if 0
/* Host buffer size */
{
struct hci_cp_host_buffer_size cp;
cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
cp.sco_mtu = HCI_MAX_SCO_SIZE;
cp.acl_max_pkt = cpu_to_le16(0xffff);
cp.sco_max_pkt = cpu_to_le16(0xffff);
hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
}
#endif
if (hdev->dev_type == HCI_BREDR) {
/* BR-EDR initialization */
/* Read Local Supported Features */
hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
/* Read BD Address */
hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
/* Read Class of Device */
hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
/* Read Local Name */
hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
/* Read Voice Setting */
hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
/* Optional initialization */
/* Clear Event Filters */
flt_type = HCI_FLT_CLEAR_ALL;
hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
/* Connection accept timeout ~20 secs */
param = cpu_to_le16(0x7d00);
hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
bacpy(&cp.bdaddr, BDADDR_ANY);
cp.delete_all = 1;
hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY,
sizeof(cp), &cp);
} else {
/* AMP initialization */
/* Connection accept timeout ~5 secs */
param = cpu_to_le16(0x1f40);
hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
/* Read AMP Info */
hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
}
}
static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
{
BT_DBG("%s", hdev->name);
/* Read LE buffer size */
hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
}
static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
{
__u8 scan = opt;
BT_DBG("%s %x", hdev->name, scan);
/* Inquiry and Page scans */
hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
}
static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
{
__u8 auth = opt;
BT_DBG("%s %x", hdev->name, auth);
/* Authentication */
hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
}
static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
{
__u8 encrypt = opt;
BT_DBG("%s %x", hdev->name, encrypt);
/* Encryption */
hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
}
static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
{
__le16 policy = cpu_to_le16(opt);
BT_DBG("%s %x", hdev->name, policy);
/* Default link policy */
hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
}
/* Get HCI device by index.
* Device is held on return. */
struct hci_dev *hci_dev_get(int index)
{
struct hci_dev *hdev = NULL;
struct list_head *p;
BT_DBG("%d", index);
if (index < 0)
return NULL;
read_lock(&hci_dev_list_lock);
list_for_each(p, &hci_dev_list) {
struct hci_dev *d = list_entry(p, struct hci_dev, list);
if (d->id == index) {
hdev = hci_dev_hold(d);
break;
}
}
read_unlock(&hci_dev_list_lock);
return hdev;
}
EXPORT_SYMBOL(hci_dev_get);
/* ---- Inquiry support ---- */
static void inquiry_cache_flush(struct hci_dev *hdev)
{
struct inquiry_cache *cache = &hdev->inq_cache;
struct inquiry_entry *next = cache->list, *e;
BT_DBG("cache %p", cache);
cache->list = NULL;
while ((e = next)) {
next = e->next;
kfree(e);
}
}
struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
struct inquiry_cache *cache = &hdev->inq_cache;
struct inquiry_entry *e;
BT_DBG("cache %p, %s", cache, batostr(bdaddr));
for (e = cache->list; e; e = e->next)
if (!bacmp(&e->data.bdaddr, bdaddr))
break;
return e;
}
void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
{
struct inquiry_cache *cache = &hdev->inq_cache;
struct inquiry_entry *ie;
BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
if (!ie) {
/* Entry not in the cache. Add new one. */
ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
if (!ie)
return;
ie->next = cache->list;
cache->list = ie;
}
memcpy(&ie->data, data, sizeof(*data));
ie->timestamp = jiffies;
cache->timestamp = jiffies;
}
static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
{
struct inquiry_cache *cache = &hdev->inq_cache;
struct inquiry_info *info = (struct inquiry_info *) buf;
struct inquiry_entry *e;
int copied = 0;
for (e = cache->list; e && copied < num; e = e->next, copied++) {
struct inquiry_data *data = &e->data;
bacpy(&info->bdaddr, &data->bdaddr);
info->pscan_rep_mode = data->pscan_rep_mode;
info->pscan_period_mode = data->pscan_period_mode;
info->pscan_mode = data->pscan_mode;
memcpy(info->dev_class, data->dev_class, 3);
info->clock_offset = data->clock_offset;
info++;
}
BT_DBG("cache %p, copied %d", cache, copied);
return copied;
}
static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
{
struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
struct hci_cp_inquiry cp;
BT_DBG("%s", hdev->name);
if (test_bit(HCI_INQUIRY, &hdev->flags))
return;
/* Start Inquiry */
memcpy(&cp.lap, &ir->lap, 3);
cp.length = ir->length;
cp.num_rsp = ir->num_rsp;
hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
}
int hci_inquiry(void __user *arg)
{
__u8 __user *ptr = arg;
struct hci_inquiry_req ir;
struct hci_dev *hdev;
int err = 0, do_inquiry = 0, max_rsp;
long timeo;
__u8 *buf;
if (copy_from_user(&ir, ptr, sizeof(ir)))
return -EFAULT;
hdev = hci_dev_get(ir.dev_id);
if (!hdev)
return -ENODEV;
hci_dev_lock_bh(hdev);
if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
inquiry_cache_empty(hdev) ||
ir.flags & IREQ_CACHE_FLUSH) {
inquiry_cache_flush(hdev);
do_inquiry = 1;
}
hci_dev_unlock_bh(hdev);
timeo = ir.length * msecs_to_jiffies(2000);
if (do_inquiry) {
err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
if (err < 0)
goto done;
}
/* for unlimited number of responses we will use buffer with 255 entries */
max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
/* cache_dump can't sleep. Therefore we allocate temp buffer and then
* copy it to the user space.
*/
buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
if (!buf) {
err = -ENOMEM;
goto done;
}
hci_dev_lock_bh(hdev);
ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
hci_dev_unlock_bh(hdev);
BT_DBG("num_rsp %d", ir.num_rsp);
if (!copy_to_user(ptr, &ir, sizeof(ir))) {
ptr += sizeof(ir);
if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
ir.num_rsp))
err = -EFAULT;
} else
err = -EFAULT;
kfree(buf);
done:
hci_dev_put(hdev);
return err;
}
/* ---- HCI ioctl helpers ---- */
int hci_dev_open(__u16 dev)
{
struct hci_dev *hdev;
int ret = 0;
hdev = hci_dev_get(dev);
if (!hdev)
return -ENODEV;
BT_DBG("%s %p", hdev->name, hdev);
hci_req_lock(hdev);
if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
ret = -ERFKILL;
goto done;
}
if (test_bit(HCI_UP, &hdev->flags)) {
ret = -EALREADY;
goto done;
}
if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
set_bit(HCI_RAW, &hdev->flags);
if (hdev->open(hdev)) {
ret = -EIO;
goto done;
}
if (!test_bit(HCI_RAW, &hdev->flags)) {
atomic_set(&hdev->cmd_cnt, 1);
set_bit(HCI_INIT, &hdev->flags);
hdev->init_last_cmd = 0;
ret = __hci_request(hdev, hci_init_req, 0,
msecs_to_jiffies(HCI_INIT_TIMEOUT));
if (lmp_le_capable(hdev))
ret = __hci_request(hdev, hci_le_init_req, 0,
msecs_to_jiffies(HCI_INIT_TIMEOUT));
clear_bit(HCI_INIT, &hdev->flags);
}
if (!ret) {
hci_dev_hold(hdev);
set_bit(HCI_UP, &hdev->flags);
hci_notify(hdev, HCI_DEV_UP);
if (!test_bit(HCI_SETUP, &hdev->flags) &&
hdev->dev_type == HCI_BREDR) {
hci_dev_lock_bh(hdev);
mgmt_powered(hdev->id, 1);
hci_dev_unlock_bh(hdev);
}
} else {
/* Init failed, cleanup */
tasklet_kill(&hdev->rx_task);
tasklet_kill(&hdev->tx_task);
tasklet_kill(&hdev->cmd_task);
skb_queue_purge(&hdev->cmd_q);
skb_queue_purge(&hdev->rx_q);
if (hdev->flush)
hdev->flush(hdev);
if (hdev->sent_cmd) {
kfree_skb(hdev->sent_cmd);
hdev->sent_cmd = NULL;
}
hdev->close(hdev);
hdev->flags = 0;
}
done:
hci_req_unlock(hdev);
hci_dev_put(hdev);
return ret;
}
static int hci_dev_do_close(struct hci_dev *hdev)
{
unsigned long keepflags = 0;
BT_DBG("%s %p", hdev->name, hdev);
hci_req_cancel(hdev, ENODEV);
hci_req_lock(hdev);
if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
del_timer_sync(&hdev->cmd_timer);
hci_req_unlock(hdev);
return 0;
}
/* Kill RX and TX tasks */
tasklet_kill(&hdev->rx_task);
tasklet_kill(&hdev->tx_task);
hci_dev_lock_bh(hdev);
inquiry_cache_flush(hdev);
hci_conn_hash_flush(hdev);
hci_dev_unlock_bh(hdev);
hci_notify(hdev, HCI_DEV_DOWN);
if (hdev->flush)
hdev->flush(hdev);
/* Reset device */
skb_queue_purge(&hdev->cmd_q);
atomic_set(&hdev->cmd_cnt, 1);
if (!test_bit(HCI_RAW, &hdev->flags)) {
set_bit(HCI_INIT, &hdev->flags);
__hci_request(hdev, hci_reset_req, 0,
msecs_to_jiffies(250));
clear_bit(HCI_INIT, &hdev->flags);
}
/* Kill cmd task */
tasklet_kill(&hdev->cmd_task);
/* Drop queues */
skb_queue_purge(&hdev->rx_q);
skb_queue_purge(&hdev->cmd_q);
skb_queue_purge(&hdev->raw_q);
/* Drop last sent command */
if (hdev->sent_cmd) {
del_timer_sync(&hdev->cmd_timer);
kfree_skb(hdev->sent_cmd);
hdev->sent_cmd = NULL;
}
/* After this point our queues are empty
* and no tasks are scheduled. */
hdev->close(hdev);
if (hdev->dev_type == HCI_BREDR) {
hci_dev_lock_bh(hdev);
mgmt_powered(hdev->id, 0);
hci_dev_unlock_bh(hdev);
}
/* Clear only non-persistent flags */
if (test_bit(HCI_MGMT, &hdev->flags))
set_bit(HCI_MGMT, &keepflags);
if (test_bit(HCI_LINK_KEYS, &hdev->flags))
set_bit(HCI_LINK_KEYS, &keepflags);
if (test_bit(HCI_DEBUG_KEYS, &hdev->flags))
set_bit(HCI_DEBUG_KEYS, &keepflags);
hdev->flags = keepflags;
hci_req_unlock(hdev);
hci_dev_put(hdev);
return 0;
}
int hci_dev_close(__u16 dev)
{
struct hci_dev *hdev;
int err;
hdev = hci_dev_get(dev);
if (!hdev)
return -ENODEV;
err = hci_dev_do_close(hdev);
hci_dev_put(hdev);
return err;
}
int hci_dev_reset(__u16 dev)
{
struct hci_dev *hdev;
int ret = 0;
hdev = hci_dev_get(dev);
if (!hdev)
return -ENODEV;
hci_req_lock(hdev);
tasklet_disable(&hdev->tx_task);
if (!test_bit(HCI_UP, &hdev->flags))
goto done;
/* Drop queues */
skb_queue_purge(&hdev->rx_q);
skb_queue_purge(&hdev->cmd_q);
hci_dev_lock_bh(hdev);
inquiry_cache_flush(hdev);
hci_conn_hash_flush(hdev);
hci_dev_unlock_bh(hdev);
if (hdev->flush)
hdev->flush(hdev);
atomic_set(&hdev->cmd_cnt, 1);
hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
if (!test_bit(HCI_RAW, &hdev->flags))
ret = __hci_request(hdev, hci_reset_req, 0,
msecs_to_jiffies(HCI_INIT_TIMEOUT));
done:
tasklet_enable(&hdev->tx_task);
hci_req_unlock(hdev);
hci_dev_put(hdev);
return ret;
}
int hci_dev_reset_stat(__u16 dev)
{
struct hci_dev *hdev;
int ret = 0;
hdev = hci_dev_get(dev);
if (!hdev)
return -ENODEV;
memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
hci_dev_put(hdev);
return ret;
}
int hci_dev_cmd(unsigned int cmd, void __user *arg)
{
struct hci_dev *hdev;
struct hci_dev_req dr;
int err = 0;
if (copy_from_user(&dr, arg, sizeof(dr)))
return -EFAULT;
hdev = hci_dev_get(dr.dev_id);
if (!hdev)
return -ENODEV;
switch (cmd) {
case HCISETAUTH:
err = hci_request(hdev, hci_auth_req, dr.dev_opt,
msecs_to_jiffies(HCI_INIT_TIMEOUT));
break;
case HCISETENCRYPT:
if (!lmp_encrypt_capable(hdev)) {
err = -EOPNOTSUPP;
break;
}
if (!test_bit(HCI_AUTH, &hdev->flags)) {
/* Auth must be enabled first */
err = hci_request(hdev, hci_auth_req, dr.dev_opt,
msecs_to_jiffies(HCI_INIT_TIMEOUT));
if (err)
break;
}
err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
msecs_to_jiffies(HCI_INIT_TIMEOUT));
break;
case HCISETSCAN:
err = hci_request(hdev, hci_scan_req, dr.dev_opt,
msecs_to_jiffies(HCI_INIT_TIMEOUT));
break;
case HCISETLINKPOL:
err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
msecs_to_jiffies(HCI_INIT_TIMEOUT));
break;
case HCISETLINKMODE:
hdev->link_mode = ((__u16) dr.dev_opt) &
(HCI_LM_MASTER | HCI_LM_ACCEPT);
break;
case HCISETPTYPE:
hdev->pkt_type = (__u16) dr.dev_opt;
break;
case HCISETACLMTU:
hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
break;
case HCISETSCOMTU:
hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
break;
default:
err = -EINVAL;
break;
}
hci_dev_put(hdev);
return err;
}
int hci_get_dev_list(void __user *arg)
{
struct hci_dev_list_req *dl;
struct hci_dev_req *dr;
struct list_head *p;
int n = 0, size, err;
__u16 dev_num;
if (get_user(dev_num, (__u16 __user *) arg))
return -EFAULT;
if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
return -EINVAL;
size = sizeof(*dl) + dev_num * sizeof(*dr);
dl = kzalloc(size, GFP_KERNEL);
if (!dl)
return -ENOMEM;
dr = dl->dev_req;
read_lock_bh(&hci_dev_list_lock);
list_for_each(p, &hci_dev_list) {
struct hci_dev *hdev;
hdev = list_entry(p, struct hci_dev, list);
hci_del_off_timer(hdev);
if (!test_bit(HCI_MGMT, &hdev->flags))
set_bit(HCI_PAIRABLE, &hdev->flags);
(dr + n)->dev_id = hdev->id;
(dr + n)->dev_opt = hdev->flags;
if (++n >= dev_num)
break;
}
read_unlock_bh(&hci_dev_list_lock);
dl->dev_num = n;
size = sizeof(*dl) + n * sizeof(*dr);
err = copy_to_user(arg, dl, size);
kfree(dl);
return err ? -EFAULT : 0;
}
int hci_get_dev_info(void __user *arg)
{
struct hci_dev *hdev;
struct hci_dev_info di;
int err = 0;
if (copy_from_user(&di, arg, sizeof(di)))
return -EFAULT;
hdev = hci_dev_get(di.dev_id);
if (!hdev)
return -ENODEV;
hci_del_off_timer(hdev);
if (!test_bit(HCI_MGMT, &hdev->flags))
set_bit(HCI_PAIRABLE, &hdev->flags);
strcpy(di.name, hdev->name);
di.bdaddr = hdev->bdaddr;
di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
di.flags = hdev->flags;
di.pkt_type = hdev->pkt_type;
di.acl_mtu = hdev->acl_mtu;
di.acl_pkts = hdev->acl_pkts;
di.sco_mtu = hdev->sco_mtu;
di.sco_pkts = hdev->sco_pkts;
di.link_policy = hdev->link_policy;
di.link_mode = hdev->link_mode;
memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
memcpy(&di.features, &hdev->features, sizeof(di.features));
if (copy_to_user(arg, &di, sizeof(di)))
err = -EFAULT;
hci_dev_put(hdev);
return err;
}
/* ---- Interface to HCI drivers ---- */
static int hci_rfkill_set_block(void *data, bool blocked)
{
struct hci_dev *hdev = data;
BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
if (!blocked)
return 0;
hci_dev_do_close(hdev);
return 0;
}
static const struct rfkill_ops hci_rfkill_ops = {
.set_block = hci_rfkill_set_block,
};
/* Alloc HCI device */
struct hci_dev *hci_alloc_dev(void)
{
struct hci_dev *hdev;
hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
if (!hdev)
return NULL;
skb_queue_head_init(&hdev->driver_init);
return hdev;
}
EXPORT_SYMBOL(hci_alloc_dev);
/* Free HCI device */
void hci_free_dev(struct hci_dev *hdev)
{
skb_queue_purge(&hdev->driver_init);
/* will free via device release */
put_device(&hdev->dev);
}
EXPORT_SYMBOL(hci_free_dev);
static void hci_power_on(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
int err;
BT_DBG("%s", hdev->name);
err = hci_dev_open(hdev->id);
if (err && err != -EALREADY)
return;
if (test_bit(HCI_AUTO_OFF, &hdev->flags) &&
hdev->dev_type == HCI_BREDR)
mod_timer(&hdev->off_timer,
jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
if (test_and_clear_bit(HCI_SETUP, &hdev->flags) &&
hdev->dev_type == HCI_BREDR)
mgmt_index_added(hdev->id);
}
static void hci_power_off(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
BT_DBG("%s", hdev->name);
hci_dev_close(hdev->id);
}
static void hci_auto_off(unsigned long data)
{
struct hci_dev *hdev = (struct hci_dev *) data;
BT_DBG("%s", hdev->name);
clear_bit(HCI_AUTO_OFF, &hdev->flags);
queue_work(hdev->workqueue, &hdev->power_off);
}
void hci_del_off_timer(struct hci_dev *hdev)
{
BT_DBG("%s", hdev->name);
clear_bit(HCI_AUTO_OFF, &hdev->flags);
del_timer(&hdev->off_timer);
}
int hci_uuids_clear(struct hci_dev *hdev)
{
struct list_head *p, *n;
list_for_each_safe(p, n, &hdev->uuids) {
struct bt_uuid *uuid;
uuid = list_entry(p, struct bt_uuid, list);
list_del(p);
kfree(uuid);
}
return 0;
}
int hci_link_keys_clear(struct hci_dev *hdev)
{
struct list_head *p, *n;
list_for_each_safe(p, n, &hdev->link_keys) {
struct link_key *key;
key = list_entry(p, struct link_key, list);
list_del(p);
kfree(key);
}
return 0;
}
struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
struct list_head *p;
list_for_each(p, &hdev->link_keys) {
struct link_key *k;
k = list_entry(p, struct link_key, list);
if (bacmp(bdaddr, &k->bdaddr) == 0)
return k;
}
return NULL;
}
struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
{
struct list_head *p;
list_for_each(p, &hdev->link_keys) {
struct link_key *k;
struct key_master_id *id;
k = list_entry(p, struct link_key, list);
if (k->key_type != KEY_TYPE_LTK)
continue;
if (k->dlen != sizeof(*id))
continue;
id = (void *) &k->data;
if (id->ediv == ediv &&
(memcmp(rand, id->rand, sizeof(id->rand)) == 0))
return k;
}
return NULL;
}
EXPORT_SYMBOL(hci_find_ltk);
struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
bdaddr_t *bdaddr, u8 type)
{
struct list_head *p;
list_for_each(p, &hdev->link_keys) {
struct link_key *k;
k = list_entry(p, struct link_key, list);
if ((k->key_type == type) && (bacmp(bdaddr, &k->bdaddr) == 0))
return k;
}
return NULL;
}
EXPORT_SYMBOL(hci_find_link_key_type);
int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
u8 *val, u8 type, u8 pin_len)
{
struct link_key *key, *old_key;
struct hci_conn *conn;
u8 old_key_type;
u8 bonded = 0;
old_key = hci_find_link_key(hdev, bdaddr);
if (old_key) {
old_key_type = old_key->key_type;
key = old_key;
} else {
old_key_type = 0xff;
key = kzalloc(sizeof(*key), GFP_ATOMIC);
if (!key)
return -ENOMEM;
list_add(&key->list, &hdev->link_keys);
}
BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
bacpy(&key->bdaddr, bdaddr);
memcpy(key->val, val, 16);
key->auth = 0x01;
key->key_type = type;
key->pin_len = pin_len;
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
/* Store the link key persistently if one of the following is true:
* 1. the remote side is using dedicated bonding since in that case
* also the local requirements are set to dedicated bonding
* 2. the local side had dedicated bonding as a requirement
* 3. this is a legacy link key
* 4. this is a changed combination key and there was a previously
* stored one
* If none of the above match only keep the link key around for
* this connection and set the temporary flag for the device.
*/
if (conn) {
if ((conn->remote_auth > 0x01) ||
(conn->auth_initiator && conn->auth_type > 0x01) ||
(key->key_type < 0x03) ||
(key->key_type == 0x06 && old_key_type != 0xff))
bonded = 1;
}
if (new_key)
mgmt_new_key(hdev->id, key, bonded);
if (type == 0x06)
key->key_type = old_key_type;
return 0;
}
int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
u8 addr_type, u8 key_size, u8 auth,
__le16 ediv, u8 rand[8], u8 ltk[16])
{
struct link_key *key, *old_key;
struct key_master_id *id;
BT_DBG("%s Auth: %2.2X addr %s type: %d", hdev->name, auth,
batostr(bdaddr), addr_type);
old_key = hci_find_link_key_type(hdev, bdaddr, KEY_TYPE_LTK);
if (old_key) {
key = old_key;
} else {
key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
if (!key)
return -ENOMEM;
list_add(&key->list, &hdev->link_keys);
}
key->dlen = sizeof(*id);
bacpy(&key->bdaddr, bdaddr);
key->addr_type = addr_type;
memcpy(key->val, ltk, sizeof(key->val));
key->key_type = KEY_TYPE_LTK;
key->pin_len = key_size;
key->auth = auth;
id = (void *) &key->data;
id->ediv = ediv;
memcpy(id->rand, rand, sizeof(id->rand));
if (new_key)
mgmt_new_key(hdev->id, key, auth & 0x01);
return 0;
}
int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
struct link_key *key;
key = hci_find_link_key(hdev, bdaddr);
if (!key)
return -ENOENT;
BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
list_del(&key->list);
kfree(key);
return 0;
}
/* HCI command timer function */
static void hci_cmd_timer(unsigned long arg)
{
struct hci_dev *hdev = (void *) arg;
BT_ERR("%s command tx timeout", hdev->name);
atomic_set(&hdev->cmd_cnt, 1);
clear_bit(HCI_RESET, &hdev->flags);
tasklet_schedule(&hdev->cmd_task);
}
struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
bdaddr_t *bdaddr)
{
struct oob_data *data;
list_for_each_entry(data, &hdev->remote_oob_data, list)
if (bacmp(bdaddr, &data->bdaddr) == 0)
return data;
return NULL;
}
int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
struct oob_data *data;
data = hci_find_remote_oob_data(hdev, bdaddr);
if (!data)
return -ENOENT;
BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
list_del(&data->list);
kfree(data);
return 0;
}
int hci_remote_oob_data_clear(struct hci_dev *hdev)
{
struct oob_data *data, *n;
list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
list_del(&data->list);
kfree(data);
}
return 0;
}
static void hci_adv_clear(unsigned long arg)
{
struct hci_dev *hdev = (void *) arg;
hci_adv_entries_clear(hdev);
}
int hci_adv_entries_clear(struct hci_dev *hdev)
{
struct list_head *p, *n;
BT_DBG("");
write_lock_bh(&hdev->adv_entries_lock);
list_for_each_safe(p, n, &hdev->adv_entries) {
struct adv_entry *entry;
entry = list_entry(p, struct adv_entry, list);
list_del(p);
kfree(entry);
}
write_unlock_bh(&hdev->adv_entries_lock);
return 0;
}
struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
struct list_head *p;
struct adv_entry *res = NULL;
BT_DBG("");
read_lock_bh(&hdev->adv_entries_lock);
list_for_each(p, &hdev->adv_entries) {
struct adv_entry *entry;
entry = list_entry(p, struct adv_entry, list);
if (bacmp(bdaddr, &entry->bdaddr) == 0) {
res = entry;
goto out;
}
}
out:
read_unlock_bh(&hdev->adv_entries_lock);
return res;
}
static inline int is_connectable_adv(u8 evt_type)
{
if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
return 1;
return 0;
}
int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
u8 *randomizer)
{
struct oob_data *data;
data = hci_find_remote_oob_data(hdev, bdaddr);
if (!data) {
data = kmalloc(sizeof(*data), GFP_ATOMIC);
if (!data)
return -ENOMEM;
bacpy(&data->bdaddr, bdaddr);
list_add(&data->list, &hdev->remote_oob_data);
}
memcpy(data->hash, hash, sizeof(data->hash));
memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
return 0;
}
int hci_add_adv_entry(struct hci_dev *hdev,
struct hci_ev_le_advertising_info *ev)
{
struct adv_entry *entry;
u8 flags = 0;
int i;
BT_DBG("");
if (!is_connectable_adv(ev->evt_type))
return -EINVAL;
if (ev->data && ev->length) {
for (i = 0; (i + 2) < ev->length; i++)
if (ev->data[i+1] == 0x01) {
flags = ev->data[i+2];
BT_DBG("flags: %2.2x", flags);
break;
} else {
i += ev->data[i];
}
}
entry = hci_find_adv_entry(hdev, &ev->bdaddr);
/* Only new entries should be added to adv_entries. So, if
* bdaddr was found, don't add it. */
if (entry) {
entry->flags = flags;
return 0;
}
entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
return -ENOMEM;
bacpy(&entry->bdaddr, &ev->bdaddr);
entry->bdaddr_type = ev->bdaddr_type;
entry->flags = flags;
write_lock(&hdev->adv_entries_lock);
list_add(&entry->list, &hdev->adv_entries);
write_unlock(&hdev->adv_entries_lock);
return 0;
}
static struct crypto_blkcipher *alloc_cypher(void)
{
if (enable_smp)
return crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
return ERR_PTR(-ENOTSUPP);
}
/* Register HCI device */
int hci_register_dev(struct hci_dev *hdev)
{
struct list_head *head = &hci_dev_list, *p;
int i, id;
BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
hdev->bus, hdev->owner);
if (!hdev->open || !hdev->close || !hdev->destruct)
return -EINVAL;
id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
write_lock_bh(&hci_dev_list_lock);
/* Find first available device id */
list_for_each(p, &hci_dev_list) {
if (list_entry(p, struct hci_dev, list)->id != id)
break;
head = p; id++;
}
sprintf(hdev->name, "hci%d", id);
hdev->id = id;
list_add(&hdev->list, head);
atomic_set(&hdev->refcnt, 1);
spin_lock_init(&hdev->lock);
hdev->flags = 0;
hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
hdev->esco_type = (ESCO_HV1);
hdev->link_mode = (HCI_LM_ACCEPT);
hdev->io_capability = 0x03; /* No Input No Output */
hdev->idle_timeout = 0;
hdev->sniff_max_interval = 800;
hdev->sniff_min_interval = 80;
tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
skb_queue_head_init(&hdev->rx_q);
skb_queue_head_init(&hdev->cmd_q);
skb_queue_head_init(&hdev->raw_q);
setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
setup_timer(&hdev->disco_timer, mgmt_disco_timeout,
(unsigned long) hdev);
setup_timer(&hdev->disco_le_timer, mgmt_disco_le_timeout,
(unsigned long) hdev);
for (i = 0; i < NUM_REASSEMBLY; i++)
hdev->reassembly[i] = NULL;
init_waitqueue_head(&hdev->req_wait_q);
mutex_init(&hdev->req_lock);
inquiry_cache_init(hdev);
hci_conn_hash_init(hdev);
hci_chan_list_init(hdev);
INIT_LIST_HEAD(&hdev->blacklist);
INIT_LIST_HEAD(&hdev->uuids);
INIT_LIST_HEAD(&hdev->link_keys);
INIT_LIST_HEAD(&hdev->remote_oob_data);
INIT_LIST_HEAD(&hdev->adv_entries);
rwlock_init(&hdev->adv_entries_lock);
setup_timer(&hdev->adv_timer, hci_adv_clear, (unsigned long) hdev);
INIT_WORK(&hdev->power_on, hci_power_on);
INIT_WORK(&hdev->power_off, hci_power_off);
setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
atomic_set(&hdev->promisc, 0);
write_unlock_bh(&hci_dev_list_lock);
hdev->workqueue = create_singlethread_workqueue(hdev->name);
if (!hdev->workqueue)
goto nomem;
hdev->tfm = alloc_cypher();
if (IS_ERR(hdev->tfm))
BT_INFO("Failed to load transform for ecb(aes): %ld",
PTR_ERR(hdev->tfm));
hci_register_sysfs(hdev);
hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
if (hdev->rfkill) {
if (rfkill_register(hdev->rfkill) < 0) {
rfkill_destroy(hdev->rfkill);
hdev->rfkill = NULL;
}
}
set_bit(HCI_AUTO_OFF, &hdev->flags);
set_bit(HCI_SETUP, &hdev->flags);
queue_work(hdev->workqueue, &hdev->power_on);
hci_notify(hdev, HCI_DEV_REG);
return id;
nomem:
write_lock_bh(&hci_dev_list_lock);
list_del(&hdev->list);
write_unlock_bh(&hci_dev_list_lock);
return -ENOMEM;
}
EXPORT_SYMBOL(hci_register_dev);
/* Unregister HCI device */
int hci_unregister_dev(struct hci_dev *hdev)
{
int i;
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
write_lock_bh(&hci_dev_list_lock);
list_del(&hdev->list);
write_unlock_bh(&hci_dev_list_lock);
hci_dev_do_close(hdev);
for (i = 0; i < NUM_REASSEMBLY; i++)
kfree_skb(hdev->reassembly[i]);
if (!test_bit(HCI_INIT, &hdev->flags) &&
!test_bit(HCI_SETUP, &hdev->flags) &&
hdev->dev_type == HCI_BREDR) {
hci_dev_lock_bh(hdev);
mgmt_index_removed(hdev->id);
hci_dev_unlock_bh(hdev);
}
if (!IS_ERR(hdev->tfm))
crypto_free_blkcipher(hdev->tfm);
hci_notify(hdev, HCI_DEV_UNREG);
if (hdev->rfkill) {
rfkill_unregister(hdev->rfkill);
rfkill_destroy(hdev->rfkill);
}
hci_unregister_sysfs(hdev);
/* Disable all timers */
hci_del_off_timer(hdev);
del_timer(&hdev->adv_timer);
del_timer(&hdev->cmd_timer);
del_timer(&hdev->disco_timer);
del_timer(&hdev->disco_le_timer);
destroy_workqueue(hdev->workqueue);
hci_dev_lock_bh(hdev);
hci_blacklist_clear(hdev);
hci_uuids_clear(hdev);
hci_link_keys_clear(hdev);
hci_remote_oob_data_clear(hdev);
hci_adv_entries_clear(hdev);
hci_dev_unlock_bh(hdev);
__hci_dev_put(hdev);
return 0;
}
EXPORT_SYMBOL(hci_unregister_dev);
/* Suspend HCI device */
int hci_suspend_dev(struct hci_dev *hdev)
{
hci_notify(hdev, HCI_DEV_SUSPEND);
return 0;
}
EXPORT_SYMBOL(hci_suspend_dev);
/* Resume HCI device */
int hci_resume_dev(struct hci_dev *hdev)
{
hci_notify(hdev, HCI_DEV_RESUME);
return 0;
}
EXPORT_SYMBOL(hci_resume_dev);
/* Receive frame from HCI drivers */
int hci_recv_frame(struct sk_buff *skb)
{
struct hci_dev *hdev = (struct hci_dev *) skb->dev;
if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
&& !test_bit(HCI_INIT, &hdev->flags))) {
kfree_skb(skb);
return -ENXIO;
}
/* Incomming skb */
bt_cb(skb)->incoming = 1;
/* Time stamp */
__net_timestamp(skb);
/* Queue frame for rx task */
skb_queue_tail(&hdev->rx_q, skb);
tasklet_schedule(&hdev->rx_task);
return 0;
}
EXPORT_SYMBOL(hci_recv_frame);
static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
int count, __u8 index)
{
int len = 0;
int hlen = 0;
int remain = count;
struct sk_buff *skb;
struct bt_skb_cb *scb;
if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
index >= NUM_REASSEMBLY)
return -EILSEQ;
skb = hdev->reassembly[index];
if (!skb) {
switch (type) {
case HCI_ACLDATA_PKT:
len = HCI_MAX_FRAME_SIZE;
hlen = HCI_ACL_HDR_SIZE;
break;
case HCI_EVENT_PKT:
len = HCI_MAX_EVENT_SIZE;
hlen = HCI_EVENT_HDR_SIZE;
break;
case HCI_SCODATA_PKT:
len = HCI_MAX_SCO_SIZE;
hlen = HCI_SCO_HDR_SIZE;
break;
}
skb = bt_skb_alloc(len, GFP_ATOMIC);
if (!skb)
return -ENOMEM;
scb = (void *) skb->cb;
scb->expect = hlen;
scb->pkt_type = type;
skb->dev = (void *) hdev;
hdev->reassembly[index] = skb;
}
while (count) {
scb = (void *) skb->cb;
len = min(scb->expect, (__u16)count);
memcpy(skb_put(skb, len), data, len);
count -= len;
data += len;
scb->expect -= len;
remain = count;
switch (type) {
case HCI_EVENT_PKT:
if (skb->len == HCI_EVENT_HDR_SIZE) {
struct hci_event_hdr *h = hci_event_hdr(skb);
scb->expect = h->plen;
if (skb_tailroom(skb) < scb->expect) {
kfree_skb(skb);
hdev->reassembly[index] = NULL;
return -ENOMEM;
}
}
break;
case HCI_ACLDATA_PKT:
if (skb->len == HCI_ACL_HDR_SIZE) {
struct hci_acl_hdr *h = hci_acl_hdr(skb);
scb->expect = __le16_to_cpu(h->dlen);
if (skb_tailroom(skb) < scb->expect) {
kfree_skb(skb);
hdev->reassembly[index] = NULL;
return -ENOMEM;
}
}
break;
case HCI_SCODATA_PKT:
if (skb->len == HCI_SCO_HDR_SIZE) {
struct hci_sco_hdr *h = hci_sco_hdr(skb);
scb->expect = h->dlen;
if (skb_tailroom(skb) < scb->expect) {
kfree_skb(skb);
hdev->reassembly[index] = NULL;
return -ENOMEM;
}
}
break;
}
if (scb->expect == 0) {
/* Complete frame */
bt_cb(skb)->pkt_type = type;
hci_recv_frame(skb);
hdev->reassembly[index] = NULL;
return remain;
}
}
return remain;
}
int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
{
int rem = 0;
if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
return -EILSEQ;
while (count) {
rem = hci_reassembly(hdev, type, data, count, type - 1);
if (rem < 0)
return rem;
data += (count - rem);
count = rem;
};
return rem;
}
EXPORT_SYMBOL(hci_recv_fragment);
#define STREAM_REASSEMBLY 0
int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
{
int type;
int rem = 0;
while (count) {
struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
if (!skb) {
struct { char type; } *pkt;
/* Start of the frame */
pkt = data;
type = pkt->type;
data++;
count--;
} else
type = bt_cb(skb)->pkt_type;
rem = hci_reassembly(hdev, type, data, count,
STREAM_REASSEMBLY);
if (rem < 0)
return rem;
data += (count - rem);
count = rem;
};
return rem;
}
EXPORT_SYMBOL(hci_recv_stream_fragment);
/* ---- Interface to upper protocols ---- */
/* Register/Unregister protocols.
* hci_task_lock is used to ensure that no tasks are running. */
int hci_register_proto(struct hci_proto *hp)
{
int err = 0;
BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
if (hp->id >= HCI_MAX_PROTO)
return -EINVAL;
write_lock_bh(&hci_task_lock);
if (!hci_proto[hp->id])
hci_proto[hp->id] = hp;
else
err = -EEXIST;
write_unlock_bh(&hci_task_lock);
return err;
}
EXPORT_SYMBOL(hci_register_proto);
int hci_unregister_proto(struct hci_proto *hp)
{
int err = 0;
BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
if (hp->id >= HCI_MAX_PROTO)
return -EINVAL;
write_lock_bh(&hci_task_lock);
if (hci_proto[hp->id])
hci_proto[hp->id] = NULL;
else
err = -ENOENT;
write_unlock_bh(&hci_task_lock);
return err;
}
EXPORT_SYMBOL(hci_unregister_proto);
int hci_register_cb(struct hci_cb *cb)
{
BT_DBG("%p name %s", cb, cb->name);
write_lock_bh(&hci_cb_list_lock);
list_add(&cb->list, &hci_cb_list);
write_unlock_bh(&hci_cb_list_lock);
return 0;
}
EXPORT_SYMBOL(hci_register_cb);
int hci_unregister_cb(struct hci_cb *cb)
{
BT_DBG("%p name %s", cb, cb->name);
write_lock_bh(&hci_cb_list_lock);
list_del(&cb->list);
write_unlock_bh(&hci_cb_list_lock);
return 0;
}
EXPORT_SYMBOL(hci_unregister_cb);
int hci_register_amp(struct amp_mgr_cb *cb)
{
BT_DBG("%p", cb);
write_lock_bh(&_mgr_cb_list_lock);
list_add(&cb->list, &_mgr_cb_list);
write_unlock_bh(&_mgr_cb_list_lock);
return 0;
}
EXPORT_SYMBOL(hci_register_amp);
int hci_unregister_amp(struct amp_mgr_cb *cb)
{
BT_DBG("%p", cb);
write_lock_bh(&_mgr_cb_list_lock);
list_del(&cb->list);
write_unlock_bh(&_mgr_cb_list_lock);
return 0;
}
EXPORT_SYMBOL(hci_unregister_amp);
void hci_amp_cmd_complete(struct hci_dev *hdev, __u16 opcode,
struct sk_buff *skb)
{
struct amp_mgr_cb *cb;
BT_DBG("opcode 0x%x", opcode);
read_lock_bh(&_mgr_cb_list_lock);
list_for_each_entry(cb, &_mgr_cb_list, list) {
if (cb->amp_cmd_complete_event)
cb->amp_cmd_complete_event(hdev, opcode, skb);
}
read_unlock_bh(&_mgr_cb_list_lock);
}
void hci_amp_cmd_status(struct hci_dev *hdev, __u16 opcode, __u8 status)
{
struct amp_mgr_cb *cb;
BT_DBG("opcode 0x%x, status %d", opcode, status);
read_lock_bh(&_mgr_cb_list_lock);
list_for_each_entry(cb, &_mgr_cb_list, list) {
if (cb->amp_cmd_status_event)
cb->amp_cmd_status_event(hdev, opcode, status);
}
read_unlock_bh(&_mgr_cb_list_lock);
}
void hci_amp_event_packet(struct hci_dev *hdev, __u8 ev_code,
struct sk_buff *skb)
{
struct amp_mgr_cb *cb;
BT_DBG("ev_code 0x%x", ev_code);
read_lock_bh(&_mgr_cb_list_lock);
list_for_each_entry(cb, &_mgr_cb_list, list) {
if (cb->amp_event)
cb->amp_event(hdev, ev_code, skb);
}
read_unlock_bh(&_mgr_cb_list_lock);
}
static int hci_send_frame(struct sk_buff *skb)
{
struct hci_dev *hdev = (struct hci_dev *) skb->dev;
if (!hdev) {
kfree_skb(skb);
return -ENODEV;
}
BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
if (atomic_read(&hdev->promisc)) {
/* Time stamp */
__net_timestamp(skb);
hci_send_to_sock(hdev, skb, NULL);
}
/* Get rid of skb owner, prior to sending to the driver. */
skb_orphan(skb);
hci_notify(hdev, HCI_DEV_WRITE);
return hdev->send(skb);
}
/* Send HCI command */
int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
{
int len = HCI_COMMAND_HDR_SIZE + plen;
struct hci_command_hdr *hdr;
struct sk_buff *skb;
BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
skb = bt_skb_alloc(len, GFP_ATOMIC);
if (!skb) {
BT_ERR("%s no memory for command", hdev->name);
return -ENOMEM;
}
hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
hdr->opcode = cpu_to_le16(opcode);
hdr->plen = plen;
if (plen)
memcpy(skb_put(skb, plen), param, plen);
BT_DBG("skb len %d", skb->len);
bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
skb->dev = (void *) hdev;
if (test_bit(HCI_INIT, &hdev->flags))
hdev->init_last_cmd = opcode;
skb_queue_tail(&hdev->cmd_q, skb);
tasklet_schedule(&hdev->cmd_task);
return 0;
}
EXPORT_SYMBOL(hci_send_cmd);
/* Get data from the previously sent command */
void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
{
struct hci_command_hdr *hdr;
if (!hdev->sent_cmd)
return NULL;
hdr = (void *) hdev->sent_cmd->data;
if (hdr->opcode != cpu_to_le16(opcode))
return NULL;
BT_DBG("%s opcode 0x%x", hdev->name, opcode);
return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
}
/* Send ACL data */
static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
{
struct hci_acl_hdr *hdr;
int len = skb->len;
skb_push(skb, HCI_ACL_HDR_SIZE);
skb_reset_transport_header(skb);
hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
hdr->dlen = cpu_to_le16(len);
}
void hci_send_acl(struct hci_conn *conn, struct hci_chan *chan,
struct sk_buff *skb, __u16 flags)
{
struct hci_dev *hdev = conn->hdev;
struct sk_buff *list;
BT_DBG("%s conn %p chan %p flags 0x%x", hdev->name, conn, chan, flags);
skb->dev = (void *) hdev;
bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
if (hdev->dev_type == HCI_BREDR)
hci_add_acl_hdr(skb, conn->handle, flags);
else
hci_add_acl_hdr(skb, chan->ll_handle, flags);
list = skb_shinfo(skb)->frag_list;
if (!list) {
/* Non fragmented */
BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
skb_queue_tail(&conn->data_q, skb);
} else {
/* Fragmented */
BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
skb_shinfo(skb)->frag_list = NULL;
/* Queue all fragments atomically */
spin_lock_bh(&conn->data_q.lock);
__skb_queue_tail(&conn->data_q, skb);
flags &= ~ACL_PB_MASK;
flags |= ACL_CONT;
do {
skb = list; list = list->next;
skb->dev = (void *) hdev;
bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
hci_add_acl_hdr(skb, conn->handle, flags);
BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
__skb_queue_tail(&conn->data_q, skb);
} while (list);
spin_unlock_bh(&conn->data_q.lock);
}
tasklet_schedule(&hdev->tx_task);
}
EXPORT_SYMBOL(hci_send_acl);
/* Send SCO data */
void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
{
struct hci_dev *hdev = conn->hdev;
struct hci_sco_hdr hdr;
BT_DBG("%s len %d", hdev->name, skb->len);
hdr.handle = cpu_to_le16(conn->handle);
hdr.dlen = skb->len;
skb_push(skb, HCI_SCO_HDR_SIZE);
skb_reset_transport_header(skb);
memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
skb->dev = (void *) hdev;
bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
skb_queue_tail(&conn->data_q, skb);
tasklet_schedule(&hdev->tx_task);
}
EXPORT_SYMBOL(hci_send_sco);
/* ---- HCI TX task (outgoing data) ---- */
/* HCI Connection scheduler */
static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
{
struct hci_conn_hash *h = &hdev->conn_hash;
struct hci_conn *conn = NULL;
int num = 0, min = ~0;
struct list_head *p;
/* We don't have to lock device here. Connections are always
* added and removed with TX task disabled. */
list_for_each(p, &h->list) {
struct hci_conn *c;
c = list_entry(p, struct hci_conn, list);
if (c->type != type || skb_queue_empty(&c->data_q))
continue;
if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
continue;
num++;
if (c->sent < min) {
min = c->sent;
conn = c;
}
}
if (conn) {
int cnt, q;
switch (conn->type) {
case ACL_LINK:
cnt = hdev->acl_cnt;
break;
case SCO_LINK:
case ESCO_LINK:
cnt = hdev->sco_cnt;
break;
case LE_LINK:
cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
break;
default:
cnt = 0;
BT_ERR("Unknown link type");
}
q = cnt / num;
*quote = q ? q : 1;
} else
*quote = 0;
BT_DBG("conn %p quote %d", conn, *quote);
return conn;
}
static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
{
struct hci_conn_hash *h = &hdev->conn_hash;
struct list_head *p;
struct hci_conn *c;
BT_ERR("%s link tx timeout", hdev->name);
/* Kill stalled connections */
list_for_each(p, &h->list) {
c = list_entry(p, struct hci_conn, list);
if (c->type == type && c->sent) {
BT_ERR("%s killing stalled connection %s",
hdev->name, batostr(&c->dst));
hci_acl_disconn(c, 0x13);
}
}
}
static inline void hci_sched_acl(struct hci_dev *hdev)
{
struct hci_conn *conn;
struct sk_buff *skb;
int quote;
BT_DBG("%s", hdev->name);
if (!test_bit(HCI_RAW, &hdev->flags)) {
/* ACL tx timeout must be longer than maximum
* link supervision timeout (40.9 seconds) */
if (hdev->acl_cnt <= 0 &&
time_after(jiffies, hdev->acl_last_tx + HZ * 45))
hci_link_tx_to(hdev, ACL_LINK);
}
while (hdev->acl_cnt > 0 &&
(conn = hci_low_sent(hdev, ACL_LINK, "e))) {
while (quote > 0 && (skb = skb_dequeue(&conn->data_q))) {
int count = 1;
BT_DBG("skb %p len %d", skb, skb->len);
if (hdev->flow_ctl_mode ==
HCI_BLOCK_BASED_FLOW_CTL_MODE)
/* Calculate count of blocks used by
* this packet
*/
count = ((skb->len - HCI_ACL_HDR_SIZE - 1) /
hdev->data_block_len) + 1;
if (count > hdev->acl_cnt)
return;
hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
hci_send_frame(skb);
hdev->acl_last_tx = jiffies;
hdev->acl_cnt -= count;
quote -= count;
conn->sent += count;
}
}
}
/* Schedule SCO */
static inline void hci_sched_sco(struct hci_dev *hdev)
{
struct hci_conn *conn;
struct sk_buff *skb;
int quote;
BT_DBG("%s", hdev->name);
while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
BT_DBG("skb %p len %d", skb, skb->len);
hci_send_frame(skb);
conn->sent++;
if (conn->sent == ~0)
conn->sent = 0;
}
}
}
static inline void hci_sched_esco(struct hci_dev *hdev)
{
struct hci_conn *conn;
struct sk_buff *skb;
int quote;
BT_DBG("%s", hdev->name);
while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, "e))) {
while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
BT_DBG("skb %p len %d", skb, skb->len);
hci_send_frame(skb);
conn->sent++;
if (conn->sent == ~0)
conn->sent = 0;
}
}
}
static inline void hci_sched_le(struct hci_dev *hdev)
{
struct hci_conn *conn;
struct sk_buff *skb;
int quote, cnt;
BT_DBG("%s", hdev->name);
if (!test_bit(HCI_RAW, &hdev->flags)) {
/* LE tx timeout must be longer than maximum
* link supervision timeout (40.9 seconds) */
if (!hdev->le_cnt && hdev->le_pkts &&
time_after(jiffies, hdev->le_last_tx + HZ * 45))
hci_link_tx_to(hdev, LE_LINK);
}
cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
while (cnt && (conn = hci_low_sent(hdev, LE_LINK, "e))) {
while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
BT_DBG("skb %p len %d", skb, skb->len);
hci_send_frame(skb);
hdev->le_last_tx = jiffies;
cnt--;
conn->sent++;
}
}
if (hdev->le_pkts)
hdev->le_cnt = cnt;
else
hdev->acl_cnt = cnt;
}
static void hci_tx_task(unsigned long arg)
{
struct hci_dev *hdev = (struct hci_dev *) arg;
struct sk_buff *skb;
read_lock(&hci_task_lock);
BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
hdev->sco_cnt, hdev->le_cnt);
/* Schedule queues and send stuff to HCI driver */
hci_sched_acl(hdev);
hci_sched_sco(hdev);
hci_sched_esco(hdev);
hci_sched_le(hdev);
/* Send next queued raw (unknown type) packet */
while ((skb = skb_dequeue(&hdev->raw_q)))
hci_send_frame(skb);
read_unlock(&hci_task_lock);
}
/* ----- HCI RX task (incoming data proccessing) ----- */
/* ACL data packet */
static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_acl_hdr *hdr = (void *) skb->data;
struct hci_conn *conn;
__u16 handle, flags;
skb_pull(skb, HCI_ACL_HDR_SIZE);
handle = __le16_to_cpu(hdr->handle);
flags = hci_flags(handle);
handle = hci_handle(handle);
BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
hdev->stat.acl_rx++;
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_handle(hdev, handle);
hci_dev_unlock(hdev);
if (conn) {
register struct hci_proto *hp;
hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
/* Send to upper protocol */
hp = hci_proto[HCI_PROTO_L2CAP];
if (hp && hp->recv_acldata) {
hp->recv_acldata(conn, skb, flags);
return;
}
} else {
BT_ERR("%s ACL packet for unknown connection handle %d",
hdev->name, handle);
}
kfree_skb(skb);
}
/* SCO data packet */
static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_sco_hdr *hdr = (void *) skb->data;
struct hci_conn *conn;
__u16 handle;
skb_pull(skb, HCI_SCO_HDR_SIZE);
handle = __le16_to_cpu(hdr->handle);
BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
hdev->stat.sco_rx++;
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_handle(hdev, handle);
hci_dev_unlock(hdev);
if (conn) {
register struct hci_proto *hp;
/* Send to upper protocol */
hp = hci_proto[HCI_PROTO_SCO];
if (hp && hp->recv_scodata) {
hp->recv_scodata(conn, skb);
return;
}
} else {
BT_ERR("%s SCO packet for unknown connection handle %d",
hdev->name, handle);
}
kfree_skb(skb);
}
static void hci_rx_task(unsigned long arg)
{
struct hci_dev *hdev = (struct hci_dev *) arg;
struct sk_buff *skb;
BT_DBG("%s", hdev->name);
read_lock(&hci_task_lock);
while ((skb = skb_dequeue(&hdev->rx_q))) {
if (atomic_read(&hdev->promisc)) {
/* Send copy to the sockets */
hci_send_to_sock(hdev, skb, NULL);
}
if (test_bit(HCI_RAW, &hdev->flags)) {
kfree_skb(skb);
continue;
}
if (test_bit(HCI_INIT, &hdev->flags)) {
/* Don't process data packets in this states. */
switch (bt_cb(skb)->pkt_type) {
case HCI_ACLDATA_PKT:
case HCI_SCODATA_PKT:
kfree_skb(skb);
continue;
}
}
/* Process frame */
switch (bt_cb(skb)->pkt_type) {
case HCI_EVENT_PKT:
hci_event_packet(hdev, skb);
break;
case HCI_ACLDATA_PKT:
BT_DBG("%s ACL data packet", hdev->name);
hci_acldata_packet(hdev, skb);
break;
case HCI_SCODATA_PKT:
BT_DBG("%s SCO data packet", hdev->name);
hci_scodata_packet(hdev, skb);
break;
default:
kfree_skb(skb);
break;
}
}
read_unlock(&hci_task_lock);
}
static void hci_cmd_task(unsigned long arg)
{
struct hci_dev *hdev = (struct hci_dev *) arg;
struct sk_buff *skb;
BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
/* Send queued commands */
if (atomic_read(&hdev->cmd_cnt)) {
skb = skb_dequeue(&hdev->cmd_q);
if (!skb)
return;
kfree_skb(hdev->sent_cmd);
hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
if (hdev->sent_cmd) {
atomic_dec(&hdev->cmd_cnt);
hci_send_frame(skb);
mod_timer(&hdev->cmd_timer,
jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
} else {
skb_queue_head(&hdev->cmd_q, skb);
tasklet_schedule(&hdev->cmd_task);
}
}
}
module_param(enable_smp, bool, 0644);
MODULE_PARM_DESC(enable_smp, "Enable SMP support (LE only)");
| gpl-2.0 |
abhinay100/forma_app | templates/doceboce405/style/lms.css | 5642 | /*
DOCEBO - The E-Learning Suite
http://www.docebo.com
license: http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
*/
.layout_header{padding:.4em 1em;background:#ffffff;}
.layout_header .left_logo{margin-top:5px;margin-left:10px;vertical-align:middle;}
.layout_menu_over{zoom:1;}
#lms_main_container{background:#ffffff;padding-top:1em;}
.layout_colum_container{padding-top:1em;}
.layout_footer{padding:.4em 1em;/*background:url('../images/back_footer.png') repeat-x 0 0 #ffffff;*/background:#eeeeee;}
.layout_footer img{vertical-align:middle;}
.powered_by{text-align:right;}
/* Lms interface management components styles */
.lms_management{}
.lms_management .area{border:dashed 1px #666; background-color:#e9e9e9; margin:3px; padding:0px;}
.lms_management .area .area_block{border:1px solid #000; background-color:#F4FAFF;}
#middlearea .yui-nav li{position:relative;margin-right:0.6em;}
.admmenu{position:relative;}
#middlearea .yui-nav li b,.num_notify{position:absolute;color:#fff;font-size:normal;text-align:center;font-size:0.9em;padding-top:1px;line-height:1.2em;text-decoration:none;
top:-10px;right:-8px;height:13px;width:12px;background:#ee1111;border:0px none;
-webkit-border-radius: 4px 4px 4px 0;-moz-border-radius: 4px 4px 4px 0;border-radius: 4px 4px 4px 0;
-webkit-box-shadow: #666 0px 2px 3px;-moz-box-shadow: #666 0px 2px 3px;box-shadow: #666 0px 2px 3px;
/*for the crappy browser we use a background image*/
_top:-11px;_right:-10px;_height:18px;_width:16px;
_background:url(../images/standard/updates.png) 0 0 no-repeat transparent;}
#middlearea .yui-nav li.selected b{_background-position:-16px 0;}
/* Course home list */
.edition_container, .dash-course{position:relative;clear:none;margin:.6em 1em;padding:0 110px 1em 0;border-bottom:1px solid #cccccc; min-height: 72px; }
.dash-course img.clogo{display:inline-block;border:2px solid #cccccc;max-width:100px;max-height:70px;padding:2px;margin:0px;}
.dash-course img.cnologo{padding:5px 20px;background:#ffffff;}
.dash-course h2{color:#003d6b;}
.dash-course h2 a{color:#003d6b;}
.course_support_info {padding:.2em .4em;}
.status_subscribed h2{background:#efefef;}
.status_subscribed h2 a{}
.status_begin h2{}
.status_begin h2 a{}
div.logo_container{width:100px; text-align:center; padding:0px; margin:0 1em .6em 0;float:left;/*position:absolute;*/}
div.info_container{padding-left:116px;}
/* Kb navigation interface */
#kb_folder_nav{margin:0 0 1em;}
div.kb_folder_box{margin:1em 0em;padding:0.4em;background:#efefef;}
div.kb_folder_box ul{padding:0; margin:0;}
div.kb_folder_box ul li{float:left;margin:0 2em;}
div.kb_folder_box ul li a{font-weight:bold;}
div.kb_folder_box ul li div{line-height:1.8em;padding-top:8px;}
div.kb_folder_box ul li ul.subfolders{padding:0 0 0 1em;line-height:1.4em;}
div.kb_folder_box ul li ul.subfolders li{float:none;font-size:83%; margin:0; padding:0;}
div.kb_folder_box ul li ul.subfolders li a{font-weight:normal;}
div.kb_folder_box ul li span{font-size:83%;padding-left:0.3em;}
p.section_title{margin-top:0.8em;margin-bottom:1em;border-bottom:1px solid #cccccc;padding-bottom:0.2em;padding-right:0.2em;font-weight:bold;}
.brd_right{border-right:1px solid #cccccc;}
div.kb_res_list{}
div.kb_res_list p{padding-left:1em;margin-top:0.4em;}
div.kb_res_list div.line{margin-top:1em;margin-bottom:2em;padding-bottom:0.5em;border-bottom:1px solid #cccccc;}
div.kb_res_list a{font-weight:bold;font-size:108%;}
div.kb_res_list div.line div.tags{font-size:90%;margin-top:0.6em;}
.games_chart{float:right;text-align:center;}
.games_chart #standings_chart{width:400px;height:250px;}
.edition_container .edition_subscribe, .catalog_action{min-width:90px;position:absolute;right:5px;top:10px;text-align:center;background:#ffffff;font-size:90%;max-width: 100px;}
.subscribed{padding:2px 4px;background:#eeeeee;border:1px solid #666666;}
.edition_container .edition_subscribe a p, .can_subscribe{padding:2px 4px;background:#ddffdd;border:1px solid #009900;}
.cannot_subscribe{padding:2px 4px;background:#ffffcc;border:1px solid #ffcc33;}
#statistic_chart{width:80%;height:250px;margin:0 auto;}
#tag_cloud{margin:0 1em 1em;}
#tag_cloud .yui-content{background:#ffffff;}
div.label_container{border:1px solid #9a9a9a;display:inline;float:left;font-weight:normal;height:80px;margin:10px 2% 0px 2%;position:relative;text-decoration:none;width:29%;overflow:hidden;}
span.label_image_cont{width:72px;height:98%;position:absolute;text-align:center;}
span.label_info_con{display:block;padding:4px 4px 4px 76px;}
img.label_image{max-width:64px;padding:4px;}
span.label_title{font-size:108%;font-weight:bold;margin:0.83em 0px;}
span.label_description{border-top:2px solid #446dd6;display:block;margin-top:5px;padding-top:5px;font-size:90%;}
div.coursepath_container{margin-bottom:15px;}
span.expand_path_info{font-weight:bold;color:#003d6b;padding:5px;}
span.coursepath_percentage{font-weight:bold;color:#003d6b;}
td.course_type{padding-left:15px;padding-right:15px;text-align:right;width:80px;color:#003d6b;}
div.coursepath_description{background-color:#efefef;font-size:90%;}
div.coursepath_description div.textof{padding:10px;}
div.coursepath_action{float:left;margin-top:5px;margin-bottom:5px;}
div.percentage_cont{border:1px solid #cccccc;padding:5px;}
div.coursepath_details{background-color:#f7f7f7;border-top:3px solid #d1d1d1;border-left:1px solid #d1d1d1;border-right:1px solid #d1d1d1;border-bottom:1px solid #d1d1d1;padding:8px 4px;}
.yui-skin-sam .yui-dt .forum_action td {text-align:right;}
.tabs_filter {float:right; top:-15px; z-index: 1; position: relative;} | gpl-2.0 |
hschauhan/xvisor-x86_64 | drivers/gpio/gpio-mxc.c | 16616 | /*
* Copyright (C) 2014 Institut de Recherche Technologique SystemX and OpenWide.
* All rights reserved.
*
* MXC GPIO support. (c) 2008 Daniel Mack <[email protected]>
* Copyright 2008 Juergen Beisert, [email protected]
*
* Based on code from Freescale,
* Copyright (C) 2004-2010 Freescale Semiconductor, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* @file gpio-mxc.c
* @author Jimmy Durand Wesolowski ([email protected])
* @brief MXC GPIO support
*/
#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/module.h>
#include <asm-generic/bug.h>
enum mxc_gpio_hwtype {
IMX1_GPIO, /* runs on i.mx1 */
IMX21_GPIO, /* runs on i.mx21 and i.mx27 */
IMX31_GPIO, /* runs on i.mx31 */
IMX35_GPIO, /* runs on all other i.mx */
};
/* device type dependent stuff */
struct mxc_gpio_hwdata {
unsigned dr_reg;
unsigned gdir_reg;
unsigned psr_reg;
unsigned icr1_reg;
unsigned icr2_reg;
unsigned imr_reg;
unsigned isr_reg;
int edge_sel_reg;
unsigned low_level;
unsigned high_level;
unsigned rise_edge;
unsigned fall_edge;
};
struct mxc_gpio_port {
struct list_head node;
void __iomem* base;
u32 irq;
u32 irq_high;
struct vmm_host_irqdomain *domain;
struct gpio_chip gc;
u32 both_edges;
};
static struct mxc_gpio_hwdata imx1_imx21_gpio_hwdata = {
.dr_reg = 0x1c,
.gdir_reg = 0x00,
.psr_reg = 0x24,
.icr1_reg = 0x28,
.icr2_reg = 0x2c,
.imr_reg = 0x30,
.isr_reg = 0x34,
.edge_sel_reg = -EINVAL,
.low_level = 0x03,
.high_level = 0x02,
.rise_edge = 0x00,
.fall_edge = 0x01,
};
static struct mxc_gpio_hwdata imx31_gpio_hwdata = {
.dr_reg = 0x00,
.gdir_reg = 0x04,
.psr_reg = 0x08,
.icr1_reg = 0x0c,
.icr2_reg = 0x10,
.imr_reg = 0x14,
.isr_reg = 0x18,
.edge_sel_reg = -EINVAL,
.low_level = 0x00,
.high_level = 0x01,
.rise_edge = 0x02,
.fall_edge = 0x03,
};
static struct mxc_gpio_hwdata imx35_gpio_hwdata = {
.dr_reg = 0x00,
.gdir_reg = 0x04,
.psr_reg = 0x08,
.icr1_reg = 0x0c,
.icr2_reg = 0x10,
.imr_reg = 0x14,
.isr_reg = 0x18,
.edge_sel_reg = 0x1c,
.low_level = 0x00,
.high_level = 0x01,
.rise_edge = 0x02,
.fall_edge = 0x03,
};
static enum mxc_gpio_hwtype mxc_gpio_hwtype;
static struct mxc_gpio_hwdata *mxc_gpio_hwdata;
#define GPIO_DR (mxc_gpio_hwdata->dr_reg)
#define GPIO_GDIR (mxc_gpio_hwdata->gdir_reg)
#define GPIO_PSR (mxc_gpio_hwdata->psr_reg)
#define GPIO_ICR1 (mxc_gpio_hwdata->icr1_reg)
#define GPIO_ICR2 (mxc_gpio_hwdata->icr2_reg)
#define GPIO_IMR (mxc_gpio_hwdata->imr_reg)
#define GPIO_ISR (mxc_gpio_hwdata->isr_reg)
#define GPIO_EDGE_SEL (mxc_gpio_hwdata->edge_sel_reg)
#define GPIO_INT_LOW_LEV (mxc_gpio_hwdata->low_level)
#define GPIO_INT_HIGH_LEV (mxc_gpio_hwdata->high_level)
#define GPIO_INT_RISE_EDGE (mxc_gpio_hwdata->rise_edge)
#define GPIO_INT_FALL_EDGE (mxc_gpio_hwdata->fall_edge)
#define GPIO_INT_BOTH_EDGES 0x4
static struct platform_device_id mxc_gpio_devtype[] = {
{
.name = "imx1-gpio",
.driver_data = IMX1_GPIO,
}, {
.name = "imx21-gpio",
.driver_data = IMX21_GPIO,
}, {
.name = "imx31-gpio",
.driver_data = IMX31_GPIO,
}, {
.name = "imx35-gpio",
.driver_data = IMX35_GPIO,
}, {
/* sentinel */
}
};
static const struct vmm_devtree_nodeid mxc_gpio_dt_ids[] = {
{ .compatible = "fsl,imx1-gpio", .data = &mxc_gpio_devtype[IMX1_GPIO], },
{ .compatible = "fsl,imx21-gpio", .data = &mxc_gpio_devtype[IMX21_GPIO], },
{ .compatible = "fsl,imx31-gpio", .data = &mxc_gpio_devtype[IMX31_GPIO], },
{ .compatible = "fsl,imx35-gpio", .data = &mxc_gpio_devtype[IMX35_GPIO], },
{ /* sentinel */ }
};
/*
* MX2 has one interrupt *for all* gpio ports. The list is used
* to save the references to all ports, so that mx2_gpio_irq_handler
* can walk through all interrupt status registers.
*/
static LIST_HEAD(mxc_gpio_ports);
/* Note: This driver assumes 32 GPIOs are handled in one register */
static int gpio_set_irq_type(struct vmm_host_irq *d, u32 type)
{
struct mxc_gpio_port *port = vmm_host_irq_get_chip_data(d);
u32 bit, val;
u32 gpio_idx = vmm_host_irqdomain_to_hwirq(port->domain, d->num);
u32 gpio = port->gc.base + gpio_idx;
int edge;
void __iomem *reg = port->base;
port->both_edges &= ~(1 << gpio_idx);
switch (type) {
case IRQ_TYPE_EDGE_RISING:
edge = GPIO_INT_RISE_EDGE;
break;
case IRQ_TYPE_EDGE_FALLING:
edge = GPIO_INT_FALL_EDGE;
break;
case IRQ_TYPE_EDGE_BOTH:
if (GPIO_EDGE_SEL >= 0) {
edge = GPIO_INT_BOTH_EDGES;
} else {
val = __gpio_get_value(gpio);
if (val) {
edge = GPIO_INT_LOW_LEV;
pr_debug("mxc: set GPIO %d to low trigger\n", gpio);
} else {
edge = GPIO_INT_HIGH_LEV;
pr_debug("mxc: set GPIO %d to high trigger\n", gpio);
}
port->both_edges |= 1 << gpio_idx;
}
break;
case IRQ_TYPE_LEVEL_LOW:
edge = GPIO_INT_LOW_LEV;
break;
case IRQ_TYPE_LEVEL_HIGH:
edge = GPIO_INT_HIGH_LEV;
break;
default:
return -EINVAL;
}
if (GPIO_EDGE_SEL >= 0) {
val = readl(port->base + GPIO_EDGE_SEL);
if (edge == GPIO_INT_BOTH_EDGES)
writel(val | (1 << gpio_idx),
port->base + GPIO_EDGE_SEL);
else
writel(val & ~(1 << gpio_idx),
port->base + GPIO_EDGE_SEL);
}
if (edge != GPIO_INT_BOTH_EDGES) {
reg += GPIO_ICR1 + ((gpio_idx & 0x10) >> 2); /* lower or upper register */
bit = gpio_idx & 0xf;
val = readl(reg) & ~(0x3 << (bit << 1));
writel(val | (edge << (bit << 1)), reg);
}
writel(1 << gpio_idx, port->base + GPIO_ISR);
return 0;
}
void __noinline mxc_flip_edge(struct mxc_gpio_port *port, u32 gpio)
{
void __iomem *reg = port->base;
u32 bit, val;
int edge;
reg += GPIO_ICR1 + ((gpio & 0x10) >> 2); /* lower or upper register */
bit = gpio & 0xf;
val = readl(reg);
edge = (val >> (bit << 1)) & 3;
val &= ~(0x3 << (bit << 1));
if (edge == GPIO_INT_HIGH_LEV) {
edge = GPIO_INT_LOW_LEV;
pr_debug("mxc: switch GPIO %d to low trigger\n", gpio);
} else if (edge == GPIO_INT_LOW_LEV) {
edge = GPIO_INT_HIGH_LEV;
pr_debug("mxc: switch GPIO %d to high trigger\n", gpio);
} else {
pr_err("mxc: invalid configuration for GPIO %d: %x\n",
gpio, edge);
return;
}
writel(val | (edge << (bit << 1)), reg);
}
/* handle 32 interrupts in one status register */
static void mxc_gpio_irq_handler(struct mxc_gpio_port *port, u32 irq_stat)
{
u32 irq_num = 0;
u32 cpu = vmm_smp_processor_id();
struct vmm_host_irq *irq;
while (irq_stat != 0) {
int irqoffset = fls(irq_stat) - 1;
if (port->both_edges & (1 << irqoffset))
mxc_flip_edge(port, irqoffset);
irq_num = vmm_host_irqdomain_find_mapping(port->domain,
irqoffset);
irq = vmm_host_irq_get(irq_num);
vmm_handle_level_irq(irq, cpu, port);
irq_stat &= ~(1 << irqoffset);
}
}
/* MX1 and MX3 has one interrupt *per* gpio port */
static vmm_irq_return_t mx3_gpio_irq_handler(int irq, void *data)
{
u32 irq_stat;
struct vmm_host_irq* desc = NULL;
struct mxc_gpio_port *port = data;
struct vmm_host_irq_chip *chip = NULL;
desc = vmm_host_irq_get(irq);
chip = vmm_host_irq_get_chip(desc);
vmm_chained_irq_enter(chip, desc);
irq_stat = readl(port->base + GPIO_ISR) & readl(port->base + GPIO_IMR);
mxc_gpio_irq_handler(port, irq_stat);
vmm_chained_irq_exit(chip, desc);
return VMM_IRQ_HANDLED;
}
/* MX2 has one interrupt *for all* gpio ports */
static vmm_irq_return_t mx2_gpio_irq_handler(int irq, void *data)
{
u32 irq_msk, irq_stat;
struct vmm_host_irq* desc = NULL;
struct mxc_gpio_port *port = NULL;
struct vmm_host_irq_chip *chip = NULL;
desc = vmm_host_irq_get(irq);
chip = vmm_host_irq_get_chip(desc);
port = vmm_host_irq_get_chip_data(desc);
vmm_chained_irq_enter(chip, desc);
/* walk through all interrupt status registers */
list_for_each_entry(port, &mxc_gpio_ports, node) {
irq_msk = readl(port->base + GPIO_IMR);
if (!irq_msk)
continue;
irq_stat = readl(port->base + GPIO_ISR) & irq_msk;
if (irq_stat)
mxc_gpio_irq_handler(port, irq_stat);
}
vmm_chained_irq_exit(chip, desc);
return VMM_IRQ_HANDLED;
}
/* FIXME: Temporary */
static void irq_gc_lock(struct vmm_host_irq_chip *gc)
{
gc = gc;
}
/* FIXME: Temporary */
static void irq_gc_unlock(struct vmm_host_irq_chip *gc)
{
gc = gc;
}
static void irq_gc_init_lock(struct vmm_host_irq_chip *gc)
{
gc = gc;
}
/**
* irq_gc_ack_set_bit - Ack pending interrupt via setting bit
* @d: irq_data
*/
void irq_gc_ack_set_bit(struct vmm_host_irq *d)
{
struct vmm_host_irq_chip *gc = vmm_host_irq_get_chip(d);
struct mxc_gpio_port *port = vmm_host_irq_get_chip_data(d);
int irqoffset = vmm_host_irqdomain_to_hwirq(port->domain, d->num);
irq_gc_lock(gc);
writel(1 << irqoffset, port->base + GPIO_ISR);
irq_gc_unlock(gc);
}
/**
* irq_gc_mask_clr_bit - Mask chip via clearing bit in mask register
* @d: irq_data
*
* Chip has a single mask register. Values of this register are cached
* and protected by gc->lock
*/
void irq_gc_mask_clr_bit(struct vmm_host_irq *d)
{
struct vmm_host_irq_chip *gc = vmm_host_irq_get_chip(d);
struct mxc_gpio_port *port = vmm_host_irq_get_chip_data(d);
int irqoffset = vmm_host_irqdomain_to_hwirq(port->domain, d->num);
u32 mask = 0;
irq_gc_lock(gc);
mask = readl(port->base + GPIO_IMR) & ~(1 << irqoffset);
writel(mask, port->base + GPIO_IMR);
irq_gc_unlock(gc);
}
/**
* irq_gc_mask_set_bit - Mask chip via setting bit in mask register
* @d: irq_data
*
* Chip has a single mask register. Values of this register are cached
* and protected by gc->lock
*/
void irq_gc_mask_set_bit(struct irq_data *d)
{
struct vmm_host_irq_chip *gc = vmm_host_irq_get_chip(d);
struct mxc_gpio_port *port = vmm_host_irq_get_chip_data(d);
int irqoffset = vmm_host_irqdomain_to_hwirq(port->domain, d->num);
u32 mask = 0;
irq_gc_lock(gc);
mask = readl(port->base + GPIO_IMR) | (1 << irqoffset);
writel(mask, port->base + GPIO_IMR);
irq_gc_unlock(gc);
}
static int __init mxc_gpio_init_gc(struct mxc_gpio_port *port,
const char *name, int sz,
struct vmm_device *dev)
{
struct vmm_host_irq_chip *gc;
int irq = 0;
int i = 0;
if (NULL == (gc = vmm_zalloc(sizeof(struct vmm_host_irq_chip))))
{
pr_err("mxc: Failed to allocate IRQ chip\n");
return -ENOMEM;
}
irq_gc_init_lock(gc);
gc->irq_ack = irq_gc_ack_set_bit;
gc->irq_mask = irq_gc_mask_clr_bit;
gc->irq_unmask = irq_gc_mask_set_bit;
gc->irq_set_type = gpio_set_irq_type;
gc->name = name;
port->domain = vmm_host_irqdomain_add(dev->of_node, -1, sz,
&irqdomain_simple_ops, port);
if (!port->domain)
return VMM_ENOTAVAIL;
for (i = 0; i < sz; ++i) {
irq = vmm_host_irqdomain_create_mapping(port->domain, i);
if (irq < 0) {
pr_err("mxc: Failed to map extended IRQs\n");
vmm_free(gc);
return -ENODEV;
}
vmm_host_irq_set_chip(irq, gc);
vmm_host_irq_set_chip_data(irq, port);
}
return VMM_OK;
}
static void mxc_gpio_get_hw(const struct vmm_devtree_nodeid *dev)
{
#if 0
const struct vmm_devtree_nodeid *nodeid =
of_match_device(mxc_gpio_dt_ids, &pdev->dev);
#endif
const struct platform_device_id *pdev = dev->data;
enum mxc_gpio_hwtype hwtype;
hwtype = pdev->driver_data;
if (mxc_gpio_hwtype) {
/*
* The driver works with a reasonable presupposition,
* that is all gpio ports must be the same type when
* running on one soc.
*/
BUG_ON(mxc_gpio_hwtype != hwtype);
return;
}
if (hwtype == IMX35_GPIO)
mxc_gpio_hwdata = &imx35_gpio_hwdata;
else if (hwtype == IMX31_GPIO)
mxc_gpio_hwdata = &imx31_gpio_hwdata;
else
mxc_gpio_hwdata = &imx1_imx21_gpio_hwdata;
mxc_gpio_hwtype = hwtype;
}
static int mxc_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
{
struct mxc_gpio_port *port =
container_of(gc, struct mxc_gpio_port, gc);
return vmm_host_irqdomain_find_mapping(port->domain, offset);
}
#define PORT_NAME_LEN 12
static int mxc_gpio_probe(struct vmm_device *dev)
{
struct device_node *np = dev->of_node;
struct mxc_gpio_port *port;
int err = VMM_OK;
int port_num = 0;
char *name = NULL;
char *irq_name = NULL;
const struct vmm_devtree_nodeid *devid;
devid = vmm_platform_match_nodeid(dev);
if (!devid)
return -ENODEV;
mxc_gpio_get_hw(devid);
port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
if (!port)
return -ENOMEM;
err = vmm_devtree_request_regmap(np, (virtual_addr_t *)&port->base, 0,
"MXC GPIO");
if (VMM_OK != err) {
dev_err(dev, "fail to map registers from the device tree\n");
goto out_regmap;
}
port->irq_high = vmm_devtree_irq_parse_map(np, 1);
port->irq = vmm_devtree_irq_parse_map(np, 0);
if (!port->irq) {
err = VMM_ENODEV;
goto out_irq_get;
}
name = vmm_malloc(PORT_NAME_LEN);
port_num = vmm_devtree_alias_get_id(dev->of_node, "gpio");
snprintf(name, PORT_NAME_LEN, "gpio_mxc%d", port_num);
/* disable the interrupt and clear the status */
writel(0, port->base + GPIO_IMR);
writel(~0, port->base + GPIO_ISR);
if (mxc_gpio_hwtype == IMX21_GPIO) {
irq_name = vmm_malloc(PORT_NAME_LEN);
strncpy(irq_name, name, PORT_NAME_LEN);
/*
* Setup one handler for all GPIO interrupts. Actually setting
* the handler is needed only once, but doing it for every port
* is more robust and easier.
*/
err = vmm_host_irq_register(port->irq, irq_name,
mx2_gpio_irq_handler, port);
if (VMM_OK != err)
goto out_irq_reg;
} else {
/* setup one handler for each entry */
irq_name = vmm_malloc(PORT_NAME_LEN + 5);
if (port->irq_high > 0) {
/* only pin 0-15 on first interrupt */
snprintf(irq_name, PORT_NAME_LEN + 5,
"gpio_mxc%d 0-15", port_num);
} else {
/* pin 0-31 on uniq interrupt */
snprintf(irq_name, PORT_NAME_LEN + 5,
"gpio_mxc%d 0-31", port_num);
}
err = vmm_host_irq_register(port->irq, irq_name,
mx3_gpio_irq_handler, port);
if (VMM_OK != err)
goto out_irq_reg;
if (port->irq_high > 0) {
/* setup handler for GPIO 16 to 31 */
irq_name = vmm_malloc(PORT_NAME_LEN + 6);
snprintf(irq_name, PORT_NAME_LEN + 6,
"gpio_mxc%d 16-31", port_num);
err = vmm_host_irq_register(port->irq_high, irq_name,
mx3_gpio_irq_handler,
port);
if (VMM_OK != err)
goto out_irq_reg_high;
}
}
err = bgpio_init(&port->gc, dev, 4,
port->base + GPIO_PSR,
port->base + GPIO_DR, NULL,
port->base + GPIO_GDIR, NULL, 0);
if (err)
goto out_bgpio;
port->gc.to_irq = mxc_gpio_to_irq;
port->gc.base = (port_num - 1) * 32;
err = gpiochip_add(&port->gc);
if (err)
goto out_bgpio;
/* gpio-mxc can be a generic irq chip */
err = mxc_gpio_init_gc(port, name, 32, dev);
if (err)
goto out_gpiochip_remove;
list_add_tail(&port->node, &mxc_gpio_ports);
dev_info(dev, "%s registered\n", name);
return err;
#if 0
out_irqdesc_free:
irq_free_descs(irq_base, 32);
#endif
out_gpiochip_remove:
gpiochip_remove(&port->gc);
out_bgpio:
if (port->irq_high > 0)
vmm_host_irq_unregister(port->irq_high, dev);
out_irq_reg_high:
vmm_host_irq_unregister(port->irq, dev);
out_irq_reg:
out_irq_get:
vmm_devtree_regunmap_release(np, (virtual_addr_t)port->base, 0);
out_regmap:
devm_kfree(dev, port);
dev_info(dev, "%s failed with errno %d\n", __func__, err);
return err;
}
static struct vmm_driver mxc_gpio_driver = {
.name = "gpio-mxc",
.match_table = mxc_gpio_dt_ids,
.probe = mxc_gpio_probe,
};
static int __init gpio_mxc_init(void)
{
return vmm_devdrv_register_driver(&mxc_gpio_driver);
}
#if 0
postcore_initcall(gpio_mxc_init);
#endif
VMM_DECLARE_MODULE("i.MX GPIO driver",
"Jimmy Durand Wesolowski",
"GPL",
1,
gpio_mxc_init,
NULL);
#if 0
MODULE_AUTHOR("Freescale Semiconductor, "
"Daniel Mack <danielncaiaq.de>, "
"Juergen Beisert <[email protected]>");
MODULE_DESCRIPTION("Freescale MXC GPIO");
MODULE_LICENSE("GPL");
#endif
| gpl-2.0 |
tanya-guza/abiword | plugins/openxml/imp/xp/OXMLi_ListenerState_Theme.h | 1549 | /* -*- mode: C++; tab-width: 4; c-basic-offset: 4; -*- */
/* AbiSource
*
* Copyright (C) 2007 Philippe Milot <[email protected]>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
* 02111-1307, USA.
*/
#ifndef _OXMLI_LISTENERSTATE_THEME_H_
#define _OXMLI_LISTENERSTATE_THEME_H_
// Internal includes
#include <OXMLi_ListenerState.h>
#include <OXMLi_Types.h>
#include <OXML_Types.h>
#include "OXML_Theme.h"
/* \class OXMLi_ListenerState_Theme
* \brief This ListenerState parses the Theme part.
*/
class OXMLi_ListenerState_Theme : public OXMLi_ListenerState
{
public:
void startElement (OXMLi_StartElementRequest * rqst);
void endElement (OXMLi_EndElementRequest * rqst);
void charData (OXMLi_CharDataRequest * rqst);
private:
OXML_SharedTheme m_theme;
UT_Error _initializeTheme();
std::string _getHexFromPreset(std::string preset);
};
#endif //_OXMLI_LISTENERSTATE_THEME_H_
| gpl-2.0 |
joelbrock/ELFCO_CORE | documentation/doxy/output/fannie/html/class_p_h_p_excel___writer___excel2007___content_types.html | 5703 | <!-- This comment will put IE 6, 7 and 8 in quirks mode -->
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<title>CORE POS - Fannie: PHPExcel_Writer_Excel2007_ContentTypes Class Reference</title>
<link href="tabs.css" rel="stylesheet" type="text/css"/>
<link href="search/search.css" rel="stylesheet" type="text/css"/>
<script type="text/javaScript" src="search/search.js"></script>
<link href="doxygen.css" rel="stylesheet" type="text/css"/>
</head>
<body>
<!-- Generated by Doxygen 1.6.3 -->
<script type="text/javascript"><!--
var searchBox = new SearchBox("searchBox", "search",false,'Search');
--></script>
<div class="navigation" id="top">
<div class="tabs">
<ul>
<li><a href="index.html"><span>Main Page</span></a></li>
<li><a href="pages.html"><span>Related Pages</span></a></li>
<li><a href="namespaces.html"><span>Namespaces</span></a></li>
<li class="current"><a href="annotated.html"><span>Classes</span></a></li>
<li><a href="files.html"><span>Files</span></a></li>
<li>
<div id="MSearchBox" class="MSearchBoxInactive">
<form id="FSearchBox" action="search.php" method="get">
<img id="MSearchSelect" src="search/search.png" alt=""/>
<input type="text" id="MSearchField" name="query" value="Search" size="20" accesskey="S"
onfocus="searchBox.OnSearchFieldFocus(true)"
onblur="searchBox.OnSearchFieldFocus(false)"/>
</form>
</div>
</li>
</ul>
</div>
<div class="tabs">
<ul>
<li><a href="annotated.html"><span>Class List</span></a></li>
<li><a href="classes.html"><span>Class Index</span></a></li>
<li><a href="hierarchy.html"><span>Class Hierarchy</span></a></li>
<li><a href="functions.html"><span>Class Members</span></a></li>
</ul>
</div>
</div>
<div class="contents">
<h1>PHPExcel_Writer_Excel2007_ContentTypes Class Reference</h1><!-- doxytag: class="PHPExcel_Writer_Excel2007_ContentTypes" --><!-- doxytag: inherits="PHPExcel_Writer_Excel2007_WriterPart" --><div class="dynheader">
Inheritance diagram for PHPExcel_Writer_Excel2007_ContentTypes:</div>
<div class="dynsection">
<div class="center">
<img src="class_p_h_p_excel___writer___excel2007___content_types.png" usemap="#PHPExcel_Writer_Excel2007_ContentTypes_map" alt=""/>
<map id="PHPExcel_Writer_Excel2007_ContentTypes_map" name="PHPExcel_Writer_Excel2007_ContentTypes_map">
<area href="class_p_h_p_excel___writer___excel2007___writer_part.html" alt="PHPExcel_Writer_Excel2007_WriterPart" shape="rect" coords="0,0,259,24"/>
</map>
</div>
</div>
<p><a href="class_p_h_p_excel___writer___excel2007___content_types-members.html">List of all members.</a></p>
<table border="0" cellpadding="0" cellspacing="0">
<tr><td colspan="2"><h2>Public Member Functions</h2></td></tr>
<tr><td class="memItemLeft" align="right" valign="top"> </td><td class="memItemRight" valign="bottom"><a class="el" href="class_p_h_p_excel___writer___excel2007___content_types.html#a73d06703fb5324cd0e1a8ddcb42e507a">writeContentTypes</a> (<a class="el" href="class_p_h_p_excel.html">PHPExcel</a> $pPHPExcel=null, $includeCharts=FALSE)</td></tr>
</table>
<hr/><h2>Member Function Documentation</h2>
<a class="anchor" id="a73d06703fb5324cd0e1a8ddcb42e507a"></a><!-- doxytag: member="PHPExcel_Writer_Excel2007_ContentTypes::writeContentTypes" ref="a73d06703fb5324cd0e1a8ddcb42e507a" args="(PHPExcel $pPHPExcel=null, $includeCharts=FALSE)" -->
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">PHPExcel_Writer_Excel2007_ContentTypes::writeContentTypes </td>
<td>(</td>
<td class="paramtype"><a class="el" href="class_p_h_p_excel.html">PHPExcel</a> $ </td>
<td class="paramname"> <em>pPHPExcel</em> = <code>null</code>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype">$ </td>
<td class="paramname"> <em>includeCharts</em> = <code>FALSE</code></td><td> </td>
</tr>
<tr>
<td></td>
<td>)</td>
<td></td><td></td><td></td>
</tr>
</table>
</div>
<div class="memdoc">
<p>Write content types to XML format</p>
<dl><dt><b>Parameters:</b></dt><dd>
<table border="0" cellspacing="2" cellpadding="0">
<tr><td valign="top"></td><td valign="top"><em><a class="el" href="class_p_h_p_excel.html">PHPExcel</a></em> </td><td>$pPHPExcel </td></tr>
<tr><td valign="top"></td><td valign="top"><em>boolean</em> </td><td>$includeCharts Flag indicating if we should include drawing details for charts </td></tr>
</table>
</dd>
</dl>
<dl class="return"><dt><b>Returns:</b></dt><dd>string XML Output </dd></dl>
<dl><dt><b>Exceptions:</b></dt><dd>
<table border="0" cellspacing="2" cellpadding="0">
<tr><td valign="top"></td><td valign="top"><em>Exception</em> </td><td></td></tr>
</table>
</dd>
</dl>
</div>
</div>
<hr/>The documentation for this class was generated from the following file:<ul>
<li>fannie/src/PHPExcel/Classes/PHPExcel/Writer/Excel2007/ContentTypes.php</li>
</ul>
</div>
<hr class="footer"/><address style="text-align: right;"><small>Generated on Fri Aug 9 21:30:39 2013 for CORE POS - Fannie by
<a href="http://www.doxygen.org/index.html">
<img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.6.3 </small></address>
</body>
</html>
| gpl-2.0 |
tonvinh/ez | vendor/ezsystems/ezpublish-kernel/eZ/Bundle/EzPublishCoreBundle/Fragment/FragmentListenerFactory.php | 1381 | <?php
/**
* File containing the FragmentListenerFactory class.
*
* @copyright Copyright (C) eZ Systems AS. All rights reserved.
* @license For full copyright and license information view LICENSE file distributed with this source code.
* @version 2014.07.0
*/
namespace eZ\Bundle\EzPublishCoreBundle\Fragment;
use Symfony\Component\HttpFoundation\Request;
use Symfony\Component\HttpKernel\UriSigner;
/**
* Custom factory for Symfony FragmentListener.
* Makes fragment paths SiteAccess aware (when in URI).
*/
class FragmentListenerFactory
{
/**
* @var \Symfony\Component\HttpFoundation\Request
*/
private $request;
public function setRequest( Request $request = null )
{
$this->request = $request;
}
public function buildFragmentListener( UriSigner $uriSigner, $fragmentPath, $fragmentListenerClass )
{
// Ensure that current pathinfo ends with configured fragment path.
// If so, consider it as the fragment path.
// This ensures to have URI siteaccess compatible fragment paths.
$pathInfo = $this->request->getPathInfo();
if ( substr( $pathInfo, -strlen( $fragmentPath ) ) === $fragmentPath )
{
$fragmentPath = $pathInfo;
}
$fragmentListener = new $fragmentListenerClass( $uriSigner, $fragmentPath );
return $fragmentListener;
}
}
| gpl-2.0 |
Astroua/CARTAvis | carta/cpp/core/Data/Image/DataFactory.cpp | 4125 | #include "DataFactory.h"
#include "Data/DataLoader.h"
#include "Data/Image/Controller.h"
#include "Data/Image/DataSource.h"
#include "Data/Region/Region.h"
#include "Data/Region/RegionFactory.h"
#include "Data/Util.h"
#include "CartaLib/Hooks/LoadRegion.h"
#include "Globals.h"
#include <QDebug>
namespace Carta {
namespace Data {
DataFactory::DataFactory(){
}
QString DataFactory::addData( Controller* controller, const QString& fileName, bool* success ){
QString result;
*success = false;
if ( controller ){
QFileInfo fileInfo( fileName );
bool dataFile = fileInfo.isFile();
if ( dataFile ){
bool regionFile = _isRegion( fileName );
//If we think it is a region, see if any of the region parsing
//plugins can handle it.
if ( regionFile ){
std::vector<std::shared_ptr<Region> > regions =
_loadRegions( controller, fileName, success, result );
if ( regions.size() > 0 ){
controller->_addDataRegions( regions );
}
}
}
//Try loading it as an image.
if ( !(*success) ){
result = controller->_addDataImage( fileName, success );
}
}
else {
result = "The data in "+fileName +" could not be added because no controller was specified.";
}
return result;
}
bool DataFactory::_isRegion( const QString& fileName ){
bool regionFile = false;
if ( fileName.endsWith( DataLoader::CRTF) ){
regionFile = true;
}
else if ( fileName.endsWith( ".reg")){
regionFile = true;
}
else {
QFile file( fileName );
if ( file.open( QIODevice::ReadOnly | QIODevice::Text)){
char buf[1024];
qint64 lineLength = file.readLine( buf, sizeof(buf));
if ( lineLength > 0 ){
QString line( buf );
if ( line.startsWith( "#CRTF") ){
regionFile = true;
}
else if ( line.startsWith( "# Region file format: DS9") ){
regionFile = true;
}
//Region files for unspecified plug-ins?
else if ( line.contains( "region", Qt::CaseInsensitive) ){
regionFile = true;
}
}
}
}
return regionFile;
}
std::vector<std::shared_ptr<Region> > DataFactory::_loadRegions( Controller* controller,
const QString& fileName, bool* success, QString& errorMsg ){
std::vector< std::shared_ptr<Region> > regions;
std::shared_ptr<DataSource> dataSource = controller->getDataSource();
if ( dataSource ){
std::shared_ptr<Carta::Lib::Image::ImageInterface> image = dataSource->_getImage();
auto result = Globals::instance()-> pluginManager()
-> prepare <Carta::Lib::Hooks::LoadRegion>(fileName, image );
auto lam = /*[=]*/[®ions,fileName] ( const Carta::Lib::Hooks::LoadRegion::ResultType &data ) {
int regionCount = data.size();
//Return whether to continue the loop or not. We continue until we
//find the first plugin that can handle the region format and generate
//one or more regions.
bool continueLoop = true;
if ( regionCount > 0 ){
continueLoop = false;
}
for ( int i = 0; i < regionCount; i++ ){
if ( data[i] ){
std::shared_ptr<Region> regionPtr = RegionFactory::makeRegion( data[i] );
regionPtr -> _setUserId( fileName, i );
regions.push_back( regionPtr );
}
}
return continueLoop;
};
try {
//Find the first plugin that can load the region.
result.forEachCond( lam );
*success = true;
}
catch( char*& error ){
errorMsg = QString( error );
*success = false;
}
}
return regions;
}
DataFactory::~DataFactory(){
}
}
}
| gpl-2.0 |
itsimbal/gcc.cet | gcc/tree-affine.h | 3892 | /* Operations with affine combinations of trees.
Copyright (C) 2005-2017 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
/* Affine combination of trees. We keep track of at most MAX_AFF_ELTS elements
to make things simpler; this is sufficient in most cases. */
#ifndef GCC_TREE_AFFINE_H
#define GCC_TREE_AFFINE_H
#define MAX_AFF_ELTS 8
/* Element of an affine combination. */
struct aff_comb_elt
{
/* The value of the element. */
tree val;
/* Its coefficient in the combination. */
widest_int coef;
};
struct aff_tree
{
/* Type of the result of the combination. */
tree type;
/* Constant offset. */
widest_int offset;
/* Number of elements of the combination. */
unsigned n;
/* Elements and their coefficients. Type of elements may be different from
TYPE, but their sizes must be the same (STRIP_NOPS is applied to the
elements).
The coefficients are always sign extended from the precision of TYPE
(regardless of signedness of TYPE). */
struct aff_comb_elt elts[MAX_AFF_ELTS];
/* Remainder of the expression. Usually NULL, used only if there are more
than MAX_AFF_ELTS elements. Type of REST will be either sizetype for
TYPE of POINTER_TYPEs or TYPE. */
tree rest;
};
struct name_expansion;
widest_int wide_int_ext_for_comb (const widest_int &, aff_tree *);
void aff_combination_const (aff_tree *, tree, const widest_int &);
void aff_combination_elt (aff_tree *, tree, tree);
void aff_combination_scale (aff_tree *, const widest_int &);
void aff_combination_mult (aff_tree *, aff_tree *, aff_tree *);
void aff_combination_add (aff_tree *, aff_tree *);
void aff_combination_add_elt (aff_tree *, tree, const widest_int &);
void aff_combination_remove_elt (aff_tree *, unsigned);
void aff_combination_convert (aff_tree *, tree);
void tree_to_aff_combination (tree, tree, aff_tree *);
tree aff_combination_to_tree (aff_tree *);
void unshare_aff_combination (aff_tree *);
bool aff_combination_constant_multiple_p (aff_tree *, aff_tree *, widest_int *);
void aff_combination_expand (aff_tree *, hash_map<tree, name_expansion *> **);
void tree_to_aff_combination_expand (tree, tree, aff_tree *,
hash_map<tree, name_expansion *> **);
tree get_inner_reference_aff (tree, aff_tree *, widest_int *);
void free_affine_expand_cache (hash_map<tree, name_expansion *> **);
bool aff_comb_cannot_overlap_p (aff_tree *, const widest_int &,
const widest_int &);
/* Debugging functions. */
void debug_aff (aff_tree *);
/* Return AFF's type. */
inline tree
aff_combination_type (aff_tree *aff)
{
return aff->type;
}
/* Return true if AFF is actually ZERO. */
inline bool
aff_combination_zero_p (aff_tree *aff)
{
if (!aff)
return true;
if (aff->n == 0 && aff->offset == 0)
return true;
return false;
}
/* Return true if AFF is actually const. */
inline bool
aff_combination_const_p (aff_tree *aff)
{
return (aff == NULL || aff->n == 0);
}
/* Return true iff AFF contains one (negated) singleton variable. Users need
to make sure AFF points to a valid combination. */
inline bool
aff_combination_singleton_var_p (aff_tree *aff)
{
return (aff->n == 1
&& aff->offset == 0
&& (aff->elts[0].coef == 1 || aff->elts[0].coef == -1));
}
#endif /* GCC_TREE_AFFINE_H */
| gpl-2.0 |
rupenp/CoreNLP | src/edu/stanford/nlp/coref/hybrid/sieve/OracleSieve.java | 1774 | package edu.stanford.nlp.coref.hybrid.sieve;
import edu.stanford.nlp.util.logging.Redwood;
import java.util.List;
import java.util.Properties;
import edu.stanford.nlp.coref.data.Dictionaries;
import edu.stanford.nlp.coref.data.Document;
import edu.stanford.nlp.coref.data.Mention;
import edu.stanford.nlp.coref.data.Dictionaries.MentionType;
public class OracleSieve extends Sieve {
/** A logger for this class */
private static Redwood.RedwoodChannels log = Redwood.channels(OracleSieve.class);
private static final long serialVersionUID = 3510248899162246138L;
public OracleSieve(Properties props, String sievename) {
super(props, sievename);
this.classifierType = ClassifierType.ORACLE;
}
@Override
public void findCoreferentAntecedent(Mention m, int mIdx, Document document, Dictionaries dict, Properties props, StringBuilder sbLog) throws Exception {
for(int distance=0 ; distance <= m.sentNum ; distance++) {
List<Mention> candidates = document.predictedMentions.get(m.sentNum-distance);
for(Mention candidate : candidates) {
if(!matchedMentionType(candidate, aTypeStr) || !matchedMentionType(m, mTypeStr)) continue;
// if(!options.mType.contains(m.mentionType) || !options.aType.contains(candidate.mentionType)) continue;
if(candidate == m) continue;
if(distance==0 && m.appearEarlierThan(candidate)) continue; // ignore cataphora
if(Sieve.isReallyCoref(document, m.mentionID, candidate.mentionID)) {
if(m.mentionType==MentionType.LIST) {
log.info("LIST MATCHING MENTION : "+m.spanToString()+"\tANT: "+candidate.spanToString());
}
Sieve.merge(document, m.mentionID, candidate.mentionID);
return;
}
}
}
}
}
| gpl-2.0 |
JMAConsulting/campaigntool | sites/all/modules/civicrm/CRM/Contribute/Form/Contribution.php | 68192 | <?php
/*
+--------------------------------------------------------------------+
| CiviCRM version 4.6 |
+--------------------------------------------------------------------+
| Copyright CiviCRM LLC (c) 2004-2015 |
+--------------------------------------------------------------------+
| This file is a part of CiviCRM. |
| |
| CiviCRM is free software; you can copy, modify, and distribute it |
| under the terms of the GNU Affero General Public License |
| Version 3, 19 November 2007 and the CiviCRM Licensing Exception. |
| |
| CiviCRM is distributed in the hope that it will be useful, but |
| WITHOUT ANY WARRANTY; without even the implied warranty of |
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. |
| See the GNU Affero General Public License for more details. |
| |
| You should have received a copy of the GNU Affero General Public |
| License and the CiviCRM Licensing Exception along |
| with this program; if not, contact CiviCRM LLC |
| at info[AT]civicrm[DOT]org. If you have questions about the |
| GNU Affero General Public License or the licensing of CiviCRM, |
| see the CiviCRM license FAQ at http://civicrm.org/licensing |
+--------------------------------------------------------------------+
*/
/**
* This class generates form components for processing a contribution.
*/
class CRM_Contribute_Form_Contribution extends CRM_Contribute_Form_AbstractEditPayment {
/**
* The id of the contribution that we are processing.
*
* @var int
*/
public $_id;
/**
* The id of the premium that we are processing.
*
* @var int
*/
public $_premiumID = NULL;
/**
* @var CRM_Contribute_DAO_ContributionProduct
*/
public $_productDAO = NULL;
/**
* The id of the note.
*
* @var int
*/
public $_noteID;
/**
* The id of the contact associated with this contribution.
*
* @var int
*/
public $_contactID;
/**
* The id of the pledge payment that we are processing.
*
* @var int
*/
public $_ppID;
/**
* The id of the pledge that we are processing.
*
* @var int
*/
public $_pledgeID;
/**
* Is this contribution associated with an online.
* financial transaction
*
* @var boolean
*/
public $_online = FALSE;
/**
* Stores all product options.
*
* @var array
*/
public $_options;
/**
* Storage of parameters from form
*
* @var array
*/
public $_params;
/**
* Store the contribution Type ID
*
* @var array
*/
public $_contributionType;
/**
* The contribution values if an existing contribution
*/
public $_values;
/**
* The pledge values if this contribution is associated with pledge
*/
public $_pledgeValues;
public $_contributeMode = 'direct';
public $_context;
/**
* Parameter with confusing name.
* @todo what is it?
* @var string
*/
public $_compContext;
public $_compId;
/**
* Possible From email addresses
* @var array
*/
public $_fromEmails;
/**
* ID of from email
* @var integer
*/
public $fromEmailId;
/**
* Store the line items if price set used.
*/
public $_lineItems;
/**
* Line item
* @todo explain why we use lineItem & lineItems
* @var array
*/
public $_lineItem;
/**
* @var array soft credit info
*/
public $_softCreditInfo;
protected $_formType;
/**
* @todo what on earth does cdType stand for????
* @var
*/
protected $_cdType;
public $_honoreeProfileType;
/**
* Array of billing panes to be displayed by billingBlock.tpl.
* Currently this is likely to look like
* array('Credit Card' => ts('Credit Card') or
* array('Direct Debit => ts('Direct Debit')
* @todo billing details (address stuff) to be added when we stop hard coding the panes in billingBlock.tpl
*
* @var array
*/
public $billingPane = array();
/**
* Array of the payment fields to be displayed in the payment fieldset (pane) in billingBlock.tpl
* this contains all the information to describe these fields from quickform. See CRM_Core_Form_Payment getPaymentFormFieldsMetadata
*
* @var array
*/
public $_paymentFields = array();
/**
* Logged in user's email.
* @var string
*/
public $userEmail;
/**
* Price set ID
* @var integer
*/
public $_priceSetId;
/**
* Price set as an array
* @var array
*/
public $_priceSet;
/**
* Form defaults
* @todo can we define this a as protected? can we define higher up the chain
* @var array
*/
public $_defaults;
/**
* User display name
*
* @var string
*/
public $userDisplayName;
/**
* Set variables up before form is built.
*/
public function preProcess() {
// Check permission for action.
if (!CRM_Core_Permission::checkActionPermission('CiviContribute', $this->_action)) {
CRM_Core_Error::fatal(ts('You do not have permission to access this page.'));
}
// @todo - if anyone ever figures out what this _cdType subroutine is about
// (or even if it still applies) please add comments!!!!!!!!!!
$this->_cdType = CRM_Utils_Array::value('type', $_GET);
$this->assign('cdType', FALSE);
if ($this->_cdType) {
$this->assign('cdType', TRUE);
CRM_Custom_Form_CustomData::preProcess($this);
return;
}
$this->_formType = CRM_Utils_Array::value('formType', $_GET);
// Get price set id.
$this->_priceSetId = CRM_Utils_Array::value('priceSetId', $_GET);
$this->set('priceSetId', $this->_priceSetId);
$this->assign('priceSetId', $this->_priceSetId);
// Get the pledge payment id
$this->_ppID = CRM_Utils_Request::retrieve('ppid', 'Positive', $this);
// Get the contact id
$this->_contactID = CRM_Utils_Request::retrieve('cid', 'Positive', $this);
// Get the action.
$this->_action = CRM_Utils_Request::retrieve('action', 'String', $this, FALSE, 'add');
$this->assign('action', $this->_action);
// Get the contribution id if update
$this->_id = CRM_Utils_Request::retrieve('id', 'Positive', $this);
if (!empty($this->_id)) {
$this->assign('contribID', $this->_id);
}
$this->_context = CRM_Utils_Request::retrieve('context', 'String', $this);
$this->assign('context', $this->_context);
$this->_compId = CRM_Utils_Request::retrieve('compId', 'Positive', $this);
$this->_compContext = CRM_Utils_Request::retrieve('compContext', 'String', $this);
//set the contribution mode.
$this->_mode = CRM_Utils_Request::retrieve('mode', 'String', $this);
$this->assign('contributionMode', $this->_mode);
if ($this->_action & CRM_Core_Action::DELETE) {
return;
}
$this->assign('showCheckNumber', TRUE);
$this->_fromEmails = CRM_Core_BAO_Email::getFromEmail();
$this->assignPaymentRelatedVariables();
if (in_array('CiviPledge', CRM_Core_Config::singleton()->enableComponents) && !$this->_formType) {
$this->preProcessPledge();
}
if ($this->_id) {
$this->showRecordLinkMesssage($this->_id);
}
$this->_values = array();
// Current contribution id.
if ($this->_id) {
$this->assignPremiumProduct($this->_id);
$this->buildValuesAndAssignOnline_Note_Type($this->_id, $this->_values);
}
// when custom data is included in this page
if (!empty($_POST['hidden_custom'])) {
$this->applyCustomData('Contribution', CRM_Utils_Array::value('financial_type_id', $_POST), $this->_id);
}
$this->_lineItems = array();
if ($this->_id) {
if (!empty($this->_compId) && $this->_compContext == 'participant') {
$this->assign('compId', $this->_compId);
$lineItem = CRM_Price_BAO_LineItem::getLineItems($this->_compId);
}
else {
$lineItem = CRM_Price_BAO_LineItem::getLineItems($this->_id, 'contribution', 1, TRUE, TRUE);
}
empty($lineItem) ? NULL : $this->_lineItems[] = $lineItem;
}
$this->assign('lineItem', empty($this->_lineItems) ? FALSE : $this->_lineItems);
// Set title
if ($this->_mode) {
$this->setPageTitle($this->_ppID ? ts('Credit Card Pledge Payment') : ts('Credit Card Contribution'));
}
else {
$this->setPageTitle($this->_ppID ? ts('Pledge Payment') : ts('Contribution'));
}
if ($this->_id) {
CRM_Contribute_Form_SoftCredit::preprocess($this);
}
}
/**
* Set default values.
*
* @return array
*/
public function setDefaultValues() {
if ($this->_cdType) {
// @todo document when this function would be called in this way
// (and whether it is valid or an overloading of this form).
return CRM_Custom_Form_CustomData::setDefaultValues($this);
}
$defaults = $this->_values;
// Set defaults for pledge payment.
if ($this->_ppID) {
$defaults['total_amount'] = CRM_Utils_Array::value('scheduled_amount', $this->_pledgeValues['pledgePayment']);
$defaults['financial_type_id'] = CRM_Utils_Array::value('financial_type_id', $this->_pledgeValues);
$defaults['currency'] = CRM_Utils_Array::value('currency', $this->_pledgeValues);
$defaults['option_type'] = 1;
}
if ($this->_action & CRM_Core_Action::DELETE) {
return $defaults;
}
$defaults['frequency_interval'] = 1;
$defaults['frequency_unit'] = 'month';
// Set soft credit defaults.
CRM_Contribute_Form_SoftCredit::setDefaultValues($defaults, $this);
if ($this->_mode) {
$config = CRM_Core_Config::singleton();
// Set default country from config if no country set.
if (empty($defaults["billing_country_id-{$this->_bltID}"])) {
$defaults["billing_country_id-{$this->_bltID}"] = $config->defaultContactCountry;
}
if (empty($defaults["billing_state_province_id-{$this->_bltID}"])) {
$defaults["billing_state_province_id-{$this->_bltID}"] = $config->defaultContactStateProvince;
}
$billingDefaults = $this->getProfileDefaults('Billing', $this->_contactID);
$defaults = array_merge($defaults, $billingDefaults);
}
if ($this->_id) {
$this->_contactID = $defaults['contact_id'];
}
// Set $newCredit variable in template to control whether link to credit card mode is included.
$this->assign('newCredit', CRM_Core_Config::isEnabledBackOfficeCreditCardPayments());
// Fix the display of the monetary value, CRM-4038.
if (isset($defaults['total_amount'])) {
if (!empty($defaults['tax_amount'])) {
$componentDetails = CRM_Contribute_BAO_Contribution::getComponentDetails($this->_id);
if (!(CRM_Utils_Array::value('membership', $componentDetails) || CRM_Utils_Array::value('participant', $componentDetails))) {
$defaults['total_amount'] = CRM_Utils_Money::format($defaults['total_amount'] - $defaults['tax_amount'], NULL, '%a');
}
}
else {
$defaults['total_amount'] = CRM_Utils_Money::format($defaults['total_amount'], NULL, '%a');
}
}
if (isset($defaults['non_deductible_amount'])) {
$defaults['non_deductible_amount'] = CRM_Utils_Money::format($defaults['non_deductible_amount'], NULL, '%a');
}
if (isset($defaults['fee_amount'])) {
$defaults['fee_amount'] = CRM_Utils_Money::format($defaults['fee_amount'], NULL, '%a');
}
if (isset($defaults['net_amount'])) {
$defaults['net_amount'] = CRM_Utils_Money::format($defaults['net_amount'], NULL, '%a');
}
if ($this->_contributionType) {
$defaults['financial_type_id'] = $this->_contributionType;
}
if (empty($defaults['payment_instrument_id'])) {
$defaults['payment_instrument_id'] = key(CRM_Core_OptionGroup::values('payment_instrument', FALSE, FALSE, FALSE, 'AND is_default = 1'));
}
if (!empty($defaults['is_test'])) {
$this->assign('is_test', TRUE);
}
$this->assign('showOption', TRUE);
// For Premium section.
if ($this->_premiumID) {
$this->assign('showOption', FALSE);
$options = isset($this->_options[$this->_productDAO->product_id]) ? $this->_options[$this->_productDAO->product_id] : "";
if (!$options) {
$this->assign('showOption', TRUE);
}
$options_key = CRM_Utils_Array::key($this->_productDAO->product_option, $options);
if ($options_key) {
$defaults['product_name'] = array($this->_productDAO->product_id, trim($options_key));
}
else {
$defaults['product_name'] = array($this->_productDAO->product_id);
}
if ($this->_productDAO->fulfilled_date) {
list($defaults['fulfilled_date']) = CRM_Utils_Date::setDateDefaults($this->_productDAO->fulfilled_date);
}
}
if (isset($this->userEmail)) {
$this->assign('email', $this->userEmail);
}
if (!empty($defaults['is_pay_later'])) {
$this->assign('is_pay_later', TRUE);
}
$this->assign('contribution_status_id', CRM_Utils_Array::value('contribution_status_id', $defaults));
$dates = array(
'receive_date',
'receipt_date',
'cancel_date',
'thankyou_date',
);
foreach ($dates as $key) {
if (!empty($defaults[$key])) {
list($defaults[$key], $defaults[$key . '_time'])
= CRM_Utils_Date::setDateDefaults(CRM_Utils_Array::value($key, $defaults), 'activityDateTime');
}
}
if (!$this->_id && empty($defaults['receive_date'])) {
list($defaults['receive_date'],
$defaults['receive_date_time']
) = CRM_Utils_Date::setDateDefaults(NULL, 'activityDateTime');
}
$this->assign('receive_date', CRM_Utils_Date::processDate(CRM_Utils_Array::value('receive_date', $defaults),
CRM_Utils_Array::value('receive_date_time', $defaults)
));
$currency = CRM_Utils_Array::value('currency', $defaults);
$this->assign('currency', $currency);
// Hack to get currency info to the js layer. CRM-11440.
CRM_Utils_Money::format(1);
$this->assign('currencySymbol', CRM_Utils_Array::value($currency, CRM_Utils_Money::$_currencySymbols));
$this->assign('totalAmount', CRM_Utils_Array::value('total_amount', $defaults));
// Inherit campaign from pledge.
if ($this->_ppID && !empty($this->_pledgeValues['campaign_id'])) {
$defaults['campaign_id'] = $this->_pledgeValues['campaign_id'];
}
$this->_defaults = $defaults;
return $defaults;
}
/**
* Build the form object.
*/
public function buildQuickForm() {
//@todo document the purpose of cdType (if still in use)
if ($this->_cdType) {
CRM_Custom_Form_CustomData::buildQuickForm($this);
return;
}
$allPanes = array();
//tax rate from financialType
$this->assign('taxRates', json_encode(CRM_Core_PseudoConstant::getTaxRates()));
$this->assign('currencies', json_encode(CRM_Core_OptionGroup::values('currencies_enabled')));
// build price set form.
$buildPriceSet = FALSE;
$invoiceSettings = CRM_Core_BAO_Setting::getItem(CRM_Core_BAO_Setting::CONTRIBUTE_PREFERENCES_NAME, 'contribution_invoice_settings');
$invoicing = CRM_Utils_Array::value('invoicing', $invoiceSettings);
$this->assign('invoicing', $invoicing);
// display tax amount on edit contribution page
if ($invoicing && $this->_action & CRM_Core_Action::UPDATE && isset($this->_values['tax_amount'])) {
$this->assign('totalTaxAmount', $this->_values['tax_amount']);
}
if (empty($this->_lineItems) &&
($this->_priceSetId || !empty($_POST['price_set_id']))
) {
$buildPriceSet = TRUE;
$getOnlyPriceSetElements = TRUE;
if (!$this->_priceSetId) {
$this->_priceSetId = $_POST['price_set_id'];
$getOnlyPriceSetElements = FALSE;
}
$this->set('priceSetId', $this->_priceSetId);
CRM_Price_BAO_PriceSet::buildPriceSet($this);
// get only price set form elements.
if ($getOnlyPriceSetElements) {
return;
}
}
// use to build form during form rule.
$this->assign('buildPriceSet', $buildPriceSet);
$showAdditionalInfo = FALSE;
$defaults = $this->_values;
$additionalDetailFields = array(
'note',
'thankyou_date',
'invoice_id',
'non_deductible_amount',
'fee_amount',
'net_amount',
);
foreach ($additionalDetailFields as $key) {
if (!empty($defaults[$key])) {
$defaults['hidden_AdditionalDetail'] = 1;
break;
}
}
if ($this->_productDAO) {
if ($this->_productDAO->product_id) {
$defaults['hidden_Premium'] = 1;
}
}
if ($this->_noteID &&
isset($this->_values['note'])
) {
$defaults['hidden_AdditionalDetail'] = 1;
}
$paneNames = array(
ts('Additional Details') => 'AdditionalDetail',
);
//Add Premium pane only if Premium is exists.
$dao = new CRM_Contribute_DAO_Product();
$dao->is_active = 1;
if ($dao->find(TRUE)) {
$paneNames[ts('Premium Information')] = 'Premium';
}
$billingPanes = array();
if ($this->_mode) {
if (CRM_Core_Payment_Form::buildPaymentForm($this, $this->_paymentProcessor, FALSE) == TRUE) {
$buildRecurBlock = TRUE;
foreach ($this->billingPane as $name => $label) {
if (!empty($this->billingFieldSets[$name]['fields'])) {
// @todo reduce variation so we don't have to convert 'credit_card' to 'CreditCard'
$billingPanes[$label] = $this->generatePane(CRM_Utils_String::convertStringToCamel($name), $defaults);
}
}
}
}
foreach ($paneNames as $name => $type) {
$allPanes[$name] = $this->generatePane($type, $defaults);
}
if (empty($this->_recurPaymentProcessors)) {
$buildRecurBlock = FALSE;
}
if ($buildRecurBlock) {
CRM_Contribute_Form_Contribution_Main::buildRecur($this);
$this->setDefaults(array('is_recur' => 0));
}
$this->assign('buildRecurBlock', $buildRecurBlock);
$qfKey = $this->controller->_key;
$this->assign('qfKey', $qfKey);
$this->assign('billingPanes', $billingPanes);
$this->assign('allPanes', $allPanes);
$this->addFormRule(array('CRM_Contribute_Form_Contribution', 'formRule'), $this);
if ($this->_formType) {
$this->assign('formType', $this->_formType);
return;
}
$this->applyFilter('__ALL__', 'trim');
if ($this->_action & CRM_Core_Action::DELETE) {
$this->addButtons(array(
array(
'type' => 'next',
'name' => ts('Delete'),
'spacing' => ' ',
'isDefault' => TRUE,
),
array(
'type' => 'cancel',
'name' => ts('Cancel'),
),
)
);
return;
}
//need to assign custom data type and subtype to the template
$this->assign('customDataType', 'Contribution');
$this->assign('customDataSubType', $this->_contributionType);
$this->assign('entityID', $this->_id);
if ($this->_context == 'standalone') {
$this->addEntityRef('contact_id', ts('Contact'), array(
'create' => TRUE,
'api' => array('extra' => array('email')),
), TRUE);
}
$attributes = CRM_Core_DAO::getAttribute('CRM_Contribute_DAO_Contribution');
$financialType = $this->add('select', 'financial_type_id',
ts('Financial Type'),
array('' => ts('- select -')) + CRM_Contribute_PseudoConstant::financialType(),
TRUE,
array('onChange' => "CRM.buildCustomData( 'Contribution', this.value );")
);
$paymentInstrument = FALSE;
if (!$this->_mode) {
$paymentInstrument = $this->add('select', 'payment_instrument_id',
ts('Paid By'),
array('' => ts('- select -')) + CRM_Contribute_PseudoConstant::paymentInstrument(),
TRUE, array('onChange' => "return showHideByValue('payment_instrument_id','4','checkNumber','table-row','select',false);")
);
}
$trxnId = $this->add('text', 'trxn_id', ts('Transaction ID'), array('class' => 'twelve') + $attributes['trxn_id']);
//add receipt for offline contribution
$this->addElement('checkbox', 'is_email_receipt', ts('Send Receipt?'));
$this->add('select', 'from_email_address', ts('Receipt From'), $this->_fromEmails);
$status = CRM_Contribute_PseudoConstant::contributionStatus();
// suppressing contribution statuses that are NOT relevant to pledges (CRM-5169)
$statusName = CRM_Contribute_PseudoConstant::contributionStatus(NULL, 'name');
if ($this->_ppID) {
foreach (array(
'Cancelled',
'Failed',
'In Progress',
) as $suppress) {
unset($status[CRM_Utils_Array::key($suppress, $statusName)]);
}
}
elseif ((!$this->_ppID && $this->_id) || !$this->_id) {
$suppressFlag = FALSE;
if ($this->_id) {
$componentDetails = CRM_Contribute_BAO_Contribution::getComponentDetails($this->_id);
if (CRM_Utils_Array::value('membership', $componentDetails) || CRM_Utils_Array::value('participant', $componentDetails)) {
$suppressFlag = TRUE;
}
}
if (!$suppressFlag) {
foreach (array(
'Overdue',
'In Progress',
) as $suppress) {
unset($status[CRM_Utils_Array::key($suppress, $statusName)]);
}
}
else {
unset($status[CRM_Utils_Array::key('Overdue', $statusName)]);
}
}
if ($this->_id) {
$contributionStatus = CRM_Core_DAO::getFieldValue('CRM_Contribute_DAO_Contribution', $this->_id, 'contribution_status_id');
$name = CRM_Utils_Array::value($contributionStatus, $statusName);
switch ($name) {
case 'Completed':
case 'Cancelled':
case 'Refunded':
unset($status[CRM_Utils_Array::key('In Progress', $statusName)]);
unset($status[CRM_Utils_Array::key('Pending', $statusName)]);
unset($status[CRM_Utils_Array::key('Failed', $statusName)]);
break;
case 'Pending':
case 'In Progress':
unset($status[CRM_Utils_Array::key('Refunded', $statusName)]);
break;
case 'Failed':
foreach (array(
'Pending',
'Refunded',
'Completed',
'In Progress',
'Cancelled',
) as $suppress) {
unset($status[CRM_Utils_Array::key($suppress, $statusName)]);
}
break;
}
}
else {
unset($status[CRM_Utils_Array::key('Refunded', $statusName)]);
}
$this->add('select', 'contribution_status_id',
ts('Contribution Status'),
$status,
FALSE
);
// add various dates
$this->addDateTime('receive_date', ts('Received'), FALSE, array('formatType' => 'activityDateTime'));
if ($this->_online) {
$this->assign('hideCalender', TRUE);
}
$checkNumber = $this->add('text', 'check_number', ts('Check Number'), $attributes['check_number']);
$this->addDateTime('receipt_date', ts('Receipt Date'), FALSE, array('formatType' => 'activityDateTime'));
$this->addDateTime('cancel_date', ts('Cancelled / Refunded Date'), FALSE, array('formatType' => 'activityDateTime'));
$this->add('textarea', 'cancel_reason', ts('Cancellation / Refund Reason'), $attributes['cancel_reason']);
$recurJs = NULL;
if ($buildRecurBlock) {
$recurJs = array('onChange' => "buildRecurBlock( this.value ); return false;");
}
$element = $this->add('select',
'payment_processor_id',
ts('Payment Processor'),
$this->_processors,
NULL,
$recurJs
);
if ($this->_online) {
$element->freeze();
}
$totalAmount = NULL;
if (empty($this->_lineItems)) {
$buildPriceSet = FALSE;
$priceSets = CRM_Price_BAO_PriceSet::getAssoc(FALSE, 'CiviContribute');
if (!empty($priceSets) && !$this->_ppID) {
$buildPriceSet = TRUE;
}
// don't allow price set for contribution if it is related to participant, or if it is a pledge payment
// and if we already have line items for that participant. CRM-5095
if ($buildPriceSet && $this->_id) {
$componentDetails = CRM_Contribute_BAO_Contribution::getComponentDetails($this->_id);
$pledgePaymentId = CRM_Core_DAO::getFieldValue('CRM_Pledge_DAO_PledgePayment',
$this->_id,
'id',
'contribution_id'
);
if ($pledgePaymentId) {
$buildPriceSet = FALSE;
}
if ($participantID = CRM_Utils_Array::value('participant', $componentDetails)) {
$participantLI = CRM_Price_BAO_LineItem::getLineItems($participantID);
if (!CRM_Utils_System::isNull($participantLI)) {
$buildPriceSet = FALSE;
}
}
}
$hasPriceSets = FALSE;
if ($buildPriceSet) {
$hasPriceSets = TRUE;
$element = $this->add('select', 'price_set_id', ts('Choose price set'),
array(
'' => ts('Choose price set'),
) + $priceSets,
NULL, array('onchange' => "buildAmount( this.value );")
);
if ($this->_online && !($this->_action & CRM_Core_Action::UPDATE)) {
$element->freeze();
}
}
$this->assign('hasPriceSets', $hasPriceSets);
$currencyFreeze = FALSE;
if (!($this->_action & CRM_Core_Action::UPDATE)) {
if ($this->_online || $this->_ppID) {
$attributes['total_amount'] = array_merge($attributes['total_amount'], array(
'READONLY' => TRUE,
'style' => "background-color:#EBECE4",
));
$optionTypes = array(
'1' => ts('Adjust Pledge Payment Schedule?'),
'2' => ts('Adjust Total Pledge Amount?'),
);
$this->addRadio('option_type',
NULL,
$optionTypes,
array(), '<br/>'
);
$currencyFreeze = TRUE;
}
}
$totalAmount = $this->addMoney('total_amount',
ts('Total Amount'),
($hasPriceSets) ? FALSE : TRUE,
$attributes['total_amount'],
TRUE, 'currency', NULL, $currencyFreeze
);
}
$this->add('text', 'source', ts('Source'), CRM_Utils_Array::value('source', $attributes));
// CRM-7362 --add campaigns.
CRM_Campaign_BAO_Campaign::addCampaign($this, CRM_Utils_Array::value('campaign_id', $this->_values));
CRM_Contribute_Form_SoftCredit::buildQuickForm($this);
$js = NULL;
if (!$this->_mode) {
$js = array('onclick' => "return verify( );");
}
$mailingInfo = CRM_Core_BAO_Setting::getItem(CRM_Core_BAO_Setting::MAILING_PREFERENCES_NAME,
'mailing_backend'
);
$this->assign('outBound_option', $mailingInfo['outBound_option']);
$this->addButtons(array(
array(
'type' => 'upload',
'name' => ts('Save'),
'js' => $js,
'isDefault' => TRUE,
),
array(
'type' => 'upload',
'name' => ts('Save and New'),
'js' => $js,
'subName' => 'new',
),
array(
'type' => 'cancel',
'name' => ts('Cancel'),
),
)
);
// if status is Cancelled freeze Amount, Payment Instrument, Check #, Financial Type,
// Net and Fee Amounts are frozen in AdditionalInfo::buildAdditionalDetail
if ($this->_id && $this->_values['contribution_status_id'] == array_search('Cancelled', $statusName)) {
if ($totalAmount) {
$totalAmount->freeze();
}
$checkNumber->freeze();
$paymentInstrument->freeze();
$trxnId->freeze();
$financialType->freeze();
}
// if contribution is related to membership or participant freeze Financial Type, Amount
if ($this->_id && isset($this->_values['tax_amount'])) {
$componentDetails = CRM_Contribute_BAO_Contribution::getComponentDetails($this->_id);
if (CRM_Utils_Array::value('membership', $componentDetails) || CRM_Utils_Array::value('participant', $componentDetails)) {
if ($totalAmount) {
$totalAmount->freeze();
}
$financialType->freeze();
$this->assign('freezeFinancialType', TRUE);
}
}
if ($this->_action & CRM_Core_Action::VIEW) {
$this->freeze();
}
}
/**
* Global form rule.
*
* @param array $fields
* The input form values.
* @param array $files
* The uploaded files if any.
* @param $self
*
* @return bool|array
* true if no errors, else array of errors
*/
public static function formRule($fields, $files, $self) {
$errors = array();
// Check for Credit Card Contribution.
if ($self->_mode) {
if (empty($fields['payment_processor_id'])) {
$errors['payment_processor_id'] = ts('Payment Processor is a required field.');
}
else {
// validate payment instrument (e.g. credit card number)
CRM_Core_Payment_Form::validatePaymentInstrument($fields['payment_processor_id'], $fields, $errors, $self);
}
}
// Do the amount validations.
if (empty($fields['total_amount']) && empty($self->_lineItems)) {
if ($priceSetId = CRM_Utils_Array::value('price_set_id', $fields)) {
CRM_Price_BAO_PriceField::priceSetValidation($priceSetId, $fields, $errors);
}
}
$softErrors = CRM_Contribute_Form_SoftCredit::formRule($fields, $errors, $self);
if (!empty($fields['total_amount']) && (!empty($fields['net_amount']) || !empty($fields['fee_amount']))) {
$sum = CRM_Utils_Rule::cleanMoney($fields['net_amount']) + CRM_Utils_Rule::cleanMoney($fields['fee_amount']);
// For taxable contribution we need to deduct taxable amount from
// (net amount + fee amount) before comparing it with total amount
if (!empty($self->_values['tax_amount'])) {
$componentDetails = CRM_Contribute_BAO_Contribution::getComponentDetails($self->_id);
if (!(CRM_Utils_Array::value('membership', $componentDetails) ||
CRM_Utils_Array::value('participant', $componentDetails))
) {
$sum = CRM_Utils_Money::format($sum - $self->_values['tax_amount'], NULL, '%a');
}
}
if (CRM_Utils_Rule::cleanMoney($fields['total_amount']) != $sum) {
$errors['total_amount'] = ts('The sum of fee amount and net amount must be equal to total amount');
}
}
// Form rule for status http://wiki.civicrm.org/confluence/display/CRM/CiviAccounts+4.3+Data+Flow
if ($self->_id && $self->_values['contribution_status_id'] != $fields['contribution_status_id']) {
CRM_Contribute_BAO_Contribution::checkStatusValidation($self->_values, $fields, $errors);
}
// CRM-16015, add form-rule to restrict change of financial type if using price field of different financial type
if ($self->_id && $self->_values['financial_type_id'] != $fields['financial_type_id']) {
CRM_Contribute_BAO_Contribution::checkFinancialTypeChange(NULL, $self->_id, $errors);
}
//FIXME FOR NEW DATA FLOW http://wiki.civicrm.org/confluence/display/CRM/CiviAccounts+4.3+Data+Flow
if (!empty($fields['fee_amount']) && !empty($fields['financial_type_id']) && $financialType = CRM_Contribute_BAO_Contribution::validateFinancialType($fields['financial_type_id'])) {
$errors['financial_type_id'] = ts("Financial Account of account relationship of 'Expense Account is' is not configured for Financial Type : ") . $financialType;
}
// $trxn_id must be unique CRM-13919
if (!empty($fields['trxn_id'])) {
$queryParams = array(1 => array($fields['trxn_id'], 'String'));
$query = 'select count(*) from civicrm_contribution where trxn_id = %1';
if ($self->_id) {
$queryParams[2] = array((int) $self->_id, 'Integer');
$query .= ' and id !=%2';
}
$tCnt = CRM_Core_DAO::singleValueQuery($query, $queryParams);
if ($tCnt) {
$errors['trxn_id'] = ts('Transaction ID\'s must be unique. Transaction \'%1\' already exists in your database.', array(1 => $fields['trxn_id']));
}
}
$errors = array_merge($errors, $softErrors);
return $errors;
}
/**
* Process the form submission.
*/
public function postProcess() {
$sendReceipt = $pId = $contribution = $isRelatedId = FALSE;
$softParams = $softIDs = array();
if ($this->_action & CRM_Core_Action::DELETE) {
CRM_Contribute_BAO_Contribution::deleteContribution($this->_id);
CRM_Core_Session::singleton()->replaceUserContext(CRM_Utils_System::url('civicrm/contact/view',
"reset=1&cid={$this->_contactID}&selectedChild=contribute"
));
return;
}
// Get the submitted form values.
$submittedValues = $this->controller->exportValues($this->_name);
if (!empty($submittedValues['price_set_id']) && $this->_action & CRM_Core_Action::UPDATE) {
$line = CRM_Price_BAO_LineItem::getLineItems($this->_id, 'contribution');
$lineID = key($line);
$priceSetId = CRM_Core_DAO::getFieldValue('CRM_Price_DAO_PriceField', CRM_Utils_Array::value('price_field_id', $line[$lineID]), 'price_set_id');
$quickConfig = CRM_Core_DAO::getFieldValue('CRM_Price_DAO_PriceSet', $priceSetId, 'is_quick_config');
if ($quickConfig) {
CRM_Price_BAO_LineItem::deleteLineItems($this->_id, 'civicrm_contribution');
}
}
// Process price set and get total amount and line items.
$lineItem = array();
$priceSetId = CRM_Utils_Array::value('price_set_id', $submittedValues);
if (empty($priceSetId) && !$this->_id) {
$this->_priceSetId = $priceSetId = CRM_Core_DAO::getFieldValue('CRM_Price_DAO_PriceSet', 'default_contribution_amount', 'id', 'name');
$this->_priceSet = current(CRM_Price_BAO_PriceSet::getSetDetail($priceSetId));
$fieldID = key($this->_priceSet['fields']);
$fieldValueId = key($this->_priceSet['fields'][$fieldID]['options']);
$this->_priceSet['fields'][$fieldID]['options'][$fieldValueId]['amount'] = $submittedValues['total_amount'];
$submittedValues['price_' . $fieldID] = 1;
}
if ($priceSetId) {
CRM_Price_BAO_PriceSet::processAmount($this->_priceSet['fields'],
$submittedValues, $lineItem[$priceSetId]);
// Unset tax amount for offline 'is_quick_config' contribution.
if ($this->_priceSet['is_quick_config'] &&
!array_key_exists($submittedValues['financial_type_id'], CRM_Core_PseudoConstant::getTaxRates())
) {
unset($submittedValues['tax_amount']);
}
$submittedValues['total_amount'] = CRM_Utils_Array::value('amount', $submittedValues);
}
if ($this->_id) {
if ($this->_compId) {
if ($this->_context == 'participant') {
$pId = $this->_compId;
}
elseif ($this->_context == 'membership') {
$isRelatedId = TRUE;
}
else {
$pId = CRM_Core_DAO::getFieldValue('CRM_Event_DAO_ParticipantPayment', $this->_id, 'participant_id', 'contribution_id');
}
}
else {
$contributionDetails = CRM_Contribute_BAO_Contribution::getComponentDetails($this->_id);
if (array_key_exists('membership', $contributionDetails)) {
$isRelatedId = TRUE;
}
elseif (array_key_exists('participant', $contributionDetails)) {
$pId = $contributionDetails['participant'];
}
}
}
if (!$priceSetId && !empty($submittedValues['total_amount']) && $this->_id) {
// CRM-10117 update the line items for participants.
if ($pId) {
$entityTable = 'participant';
$entityID = $pId;
$isRelatedId = FALSE;
$participantParams = array(
'fee_amount' => $submittedValues['total_amount'],
'id' => $entityID,
);
CRM_Event_BAO_Participant::add($participantParams);
if (empty($this->_lineItems)) {
$this->_lineItems[] = CRM_Price_BAO_LineItem::getLineItems($entityID, 'participant', 1);
}
}
else {
$entityTable = 'contribution';
$entityID = $this->_id;
}
$lineItems = CRM_Price_BAO_LineItem::getLineItems($entityID, $entityTable, NULL, TRUE, $isRelatedId);
foreach (array_keys($lineItems) as $id) {
$lineItems[$id]['id'] = $id;
}
$itemId = key($lineItems);
if ($itemId && !empty($lineItems[$itemId]['price_field_id'])) {
$this->_priceSetId = CRM_Core_DAO::getFieldValue('CRM_Price_DAO_PriceField', $lineItems[$itemId]['price_field_id'], 'price_set_id');
}
if ($this->_priceSetId && CRM_Core_DAO::getFieldValue('CRM_Price_DAO_PriceSet', $this->_priceSetId, 'is_quick_config')) {
$lineItems[$itemId]['unit_price'] = $lineItems[$itemId]['line_total'] = CRM_Utils_Rule::cleanMoney(CRM_Utils_Array::value('total_amount', $submittedValues));
// Update line total and total amount with tax on edit.
$financialItemsId = CRM_Core_PseudoConstant::getTaxRates();
if (array_key_exists($submittedValues['financial_type_id'], $financialItemsId)) {
$lineItems[$itemId]['tax_rate'] = $financialItemsId[$submittedValues['financial_type_id']];
}
else {
$lineItems[$itemId]['tax_rate'] = $lineItems[$itemId]['tax_amount'] = "";
$submittedValues['tax_amount'] = 'null';
}
if ($lineItems[$itemId]['tax_rate']) {
$lineItems[$itemId]['tax_amount'] = ($lineItems[$itemId]['tax_rate'] / 100) * $lineItems[$itemId]['line_total'];
$submittedValues['total_amount'] = $lineItems[$itemId]['line_total'] + $lineItems[$itemId]['tax_amount'];
$submittedValues['tax_amount'] = $lineItems[$itemId]['tax_amount'];
}
}
// CRM-10117 update the line items for participants.
if (!empty($lineItems[$itemId]['price_field_id'])) {
$lineItem[$this->_priceSetId] = $lineItems;
}
}
$isQuickConfig = 0;
if ($this->_priceSetId && CRM_Core_DAO::getFieldValue('CRM_Price_DAO_PriceSet', $this->_priceSetId, 'is_quick_config')) {
$isQuickConfig = 1;
}
//CRM-11529 for quick config back office transactions
//when financial_type_id is passed in form, update the
//line items with the financial type selected in form
if ($isQuickConfig && !empty($submittedValues['financial_type_id']) && CRM_Utils_Array::value($this->_priceSetId, $lineItem)
) {
foreach ($lineItem[$this->_priceSetId] as &$values) {
$values['financial_type_id'] = $submittedValues['financial_type_id'];
}
}
if (!isset($submittedValues['total_amount'])) {
$submittedValues['total_amount'] = CRM_Utils_Array::value('total_amount', $this->_values);
}
$this->assign('lineItem', !empty($lineItem) && !$isQuickConfig ? $lineItem : FALSE);
if (!empty($submittedValues['pcp_made_through_id'])) {
$pcp = array();
$fields = array(
'pcp_made_through_id',
'pcp_display_in_roll',
'pcp_roll_nickname',
'pcp_personal_note',
);
foreach ($fields as $f) {
$pcp[$f] = CRM_Utils_Array::value($f, $submittedValues);
}
}
$isEmpty = array_keys(array_flip($submittedValues['soft_credit_contact_id']));
if ($this->_id && count($isEmpty) == 1 && key($isEmpty) == NULL) {
//Delete existing soft credit records if soft credit list is empty on update
CRM_Contribute_BAO_ContributionSoft::del(array('contribution_id' => $this->_id));
}
else {
//build soft credit params
foreach ($submittedValues['soft_credit_contact_id'] as $key => $val) {
if ($val && $submittedValues['soft_credit_amount'][$key]) {
$softParams[$key]['contact_id'] = $val;
$softParams[$key]['amount'] = CRM_Utils_Rule::cleanMoney($submittedValues['soft_credit_amount'][$key]);
$softParams[$key]['soft_credit_type_id'] = $submittedValues['soft_credit_type'][$key];
if (!empty($submittedValues['soft_credit_id'][$key])) {
$softIDs[] = $softParams[$key]['id'] = $submittedValues['soft_credit_id'][$key];
}
}
}
}
// set the contact, when contact is selected
if (!empty($submittedValues['contact_id'])) {
$this->_contactID = $submittedValues['contact_id'];
}
// Credit Card Contribution.
if ($this->_mode) {
$this->processCreditCard($submittedValues, $lineItem);
}
else {
// Offline Contribution.
$submittedValues = $this->unsetCreditCardFields($submittedValues);
// get the required field value only.
$formValues = $submittedValues;
$params = $ids = array();
$params['contact_id'] = $this->_contactID;
$params['currency'] = $this->getCurrency($submittedValues);
$fields = array(
'financial_type_id',
'contribution_status_id',
'payment_instrument_id',
'cancel_reason',
'source',
'check_number',
);
foreach ($fields as $f) {
$params[$f] = CRM_Utils_Array::value($f, $formValues);
}
if (!empty($pcp)) {
$params['pcp'] = $pcp;
}
if (!empty($softParams)) {
$params['soft_credit'] = $softParams;
$params['soft_credit_ids'] = $softIDs;
}
// CRM-5740 if priceset is used, no need to cleanup money.
if ($priceSetId) {
$params['skipCleanMoney'] = 1;
}
$dates = array(
'receive_date',
'receipt_date',
'cancel_date',
);
foreach ($dates as $d) {
$params[$d] = CRM_Utils_Date::processDate($formValues[$d], $formValues[$d . '_time'], TRUE);
}
if (!empty($formValues['is_email_receipt'])) {
$params['receipt_date'] = date("Y-m-d");
}
if ($params['contribution_status_id'] == CRM_Core_OptionGroup::getValue('contribution_status', 'Cancelled', 'name')
|| $params['contribution_status_id'] == CRM_Core_OptionGroup::getValue('contribution_status', 'Refunded', 'name')
) {
if (CRM_Utils_System::isNull(CRM_Utils_Array::value('cancel_date', $params))) {
$params['cancel_date'] = date('Y-m-d');
}
}
else {
$params['cancel_date'] = $params['cancel_reason'] = 'null';
}
// Set is_pay_later flag for back-office offline Pending status contributions CRM-8996
// else if contribution_status is changed to Completed is_pay_later flag is changed to 0, CRM-15041
if ($params['contribution_status_id'] == CRM_Core_OptionGroup::getValue('contribution_status', 'Pending', 'name')) {
$params['is_pay_later'] = 1;
}
elseif ($params['contribution_status_id'] == CRM_Core_OptionGroup::getValue('contribution_status', 'Completed', 'name')) {
$params['is_pay_later'] = 0;
}
$ids['contribution'] = $params['id'] = $this->_id;
// Add Additional common information to formatted params.
CRM_Contribute_Form_AdditionalInfo::postProcessCommon($formValues, $params, $this);
if ($pId) {
$params['contribution_mode'] = 'participant';
$params['participant_id'] = $pId;
$params['skipLineItem'] = 1;
}
elseif ($isRelatedId) {
$params['contribution_mode'] = 'membership';
}
$params['line_item'] = $lineItem;
$params['payment_processor_id'] = $params['payment_processor'] = CRM_Utils_Array::value('id', $this->_paymentProcessor);
if (isset($submittedValues['tax_amount'])) {
$params['tax_amount'] = $submittedValues['tax_amount'];
}
//create contribution.
if ($isQuickConfig) {
$params['is_quick_config'] = 1;
}
// CRM-11956
// if non_deductible_amount exists i.e. Additional Details field set was opened [and staff typed something] -
// if non_deductible_amount does NOT exist - then calculate it depending on:
// $ContributionType->is_deductible and whether there is a product (premium).
if (empty($params['non_deductible_amount'])) {
$contributionType = new CRM_Financial_DAO_FinancialType();
$contributionType->id = $params['financial_type_id'];
if (!$contributionType->find(TRUE)) {
CRM_Core_Error::fatal('Could not find a system table');
}
if ($contributionType->is_deductible) {
if (isset($formValues['product_name'][0])) {
$selectProduct = $formValues['product_name'][0];
}
// if there is a product - compare the value to the contribution amount
if (isset($selectProduct)) {
$productDAO = new CRM_Contribute_DAO_Product();
$productDAO->id = $selectProduct;
$productDAO->find(TRUE);
// product value exceeds contribution amount
if ($params['total_amount'] < $productDAO->price) {
$params['non_deductible_amount'] = $params['total_amount'];
}
// product value does NOT exceed contribution amount
else {
$params['non_deductible_amount'] = $productDAO->price;
}
}
// contribution is deductible - but there is no product
else {
$params['non_deductible_amount'] = '0.00';
}
}
// contribution is NOT deductible
else {
$params['non_deductible_amount'] = $params['total_amount'];
}
}
$contribution = CRM_Contribute_BAO_Contribution::create($params, $ids);
// process associated membership / participant, CRM-4395
$relatedComponentStatusMsg = NULL;
if ($contribution->id && $this->_action & CRM_Core_Action::UPDATE) {
$relatedComponentStatusMsg = $this->updateRelatedComponent($contribution->id,
$contribution->contribution_status_id,
CRM_Utils_Array::value('contribution_status_id',
$this->_values
),
$contribution->receive_date
);
}
//process note
if ($contribution->id && isset($formValues['note'])) {
CRM_Contribute_Form_AdditionalInfo::processNote($formValues, $this->_contactID, $contribution->id, $this->_noteID);
}
//process premium
if ($contribution->id && isset($formValues['product_name'][0])) {
CRM_Contribute_Form_AdditionalInfo::processPremium($formValues, $contribution->id,
$this->_premiumID, $this->_options
);
}
// assign tax calculation for contribution receipts
$taxRate = array();
$getTaxDetails = FALSE;
$invoiceSettings = CRM_Core_BAO_Setting::getItem(CRM_Core_BAO_Setting::CONTRIBUTE_PREFERENCES_NAME, 'contribution_invoice_settings');
$invoicing = CRM_Utils_Array::value('invoicing', $invoiceSettings);
if ($invoicing) {
if ($this->_action & CRM_Core_Action::ADD) {
$line = $lineItem;
}
elseif ($this->_action & CRM_Core_Action::UPDATE) {
$line = $this->_lineItems;
}
foreach ($line as $key => $value) {
foreach ($value as $v) {
if (isset($taxRate[(string) CRM_Utils_Array::value('tax_rate', $v)])) {
$taxRate[(string) $v['tax_rate']] = $taxRate[(string) $v['tax_rate']] + CRM_Utils_Array::value('tax_amount', $v);
}
else {
if (isset($v['tax_rate'])) {
$taxRate[(string) $v['tax_rate']] = CRM_Utils_Array::value('tax_amount', $v);
$getTaxDetails = TRUE;
}
}
}
}
}
if ($invoicing) {
if ($this->_action & CRM_Core_Action::UPDATE) {
if (isset($submittedValues['tax_amount'])) {
$totalTaxAmount = $submittedValues['tax_amount'];
}
else {
$totalTaxAmount = $this->_values['tax_amount'];
}
$this->assign('totalTaxAmount', $totalTaxAmount);
$this->assign('dataArray', $taxRate);
}
else {
if (!empty($submittedValues['price_set_id'])) {
$this->assign('totalTaxAmount', $submittedValues['tax_amount']);
$this->assign('getTaxDetails', $getTaxDetails);
$this->assign('dataArray', $taxRate);
$this->assign('taxTerm', CRM_Utils_Array::value('tax_term', $invoiceSettings));
}
else {
$this->assign('totalTaxAmount', CRM_Utils_Array::value('tax_amount', $submittedValues));
}
}
}
//send receipt mail.
if ($contribution->id && !empty($formValues['is_email_receipt'])) {
$formValues['contact_id'] = $this->_contactID;
$formValues['contribution_id'] = $contribution->id;
$formValues += CRM_Contribute_BAO_ContributionSoft::getSoftContribution($contribution->id);
// to get 'from email id' for send receipt
$this->fromEmailId = $formValues['from_email_address'];
$sendReceipt = CRM_Contribute_Form_AdditionalInfo::emailReceipt($this, $formValues);
}
$pledgePaymentId = CRM_Core_DAO::getFieldValue('CRM_Pledge_DAO_PledgePayment',
$contribution->id,
'id',
'contribution_id'
);
//update pledge payment status.
if ((($this->_ppID && $contribution->id) && $this->_action & CRM_Core_Action::ADD) ||
(($pledgePaymentId) && $this->_action & CRM_Core_Action::UPDATE)
) {
if ($this->_ppID) {
//store contribution id in payment record.
CRM_Core_DAO::setFieldValue('CRM_Pledge_DAO_PledgePayment', $this->_ppID, 'contribution_id', $contribution->id);
}
else {
$this->_ppID = CRM_Core_DAO::getFieldValue('CRM_Pledge_DAO_PledgePayment',
$contribution->id,
'id',
'contribution_id'
);
$this->_pledgeID = CRM_Core_DAO::getFieldValue('CRM_Pledge_DAO_PledgePayment',
$contribution->id,
'pledge_id',
'contribution_id'
);
}
$adjustTotalAmount = FALSE;
if (CRM_Utils_Array::value('option_type', $formValues) == 2) {
$adjustTotalAmount = TRUE;
}
$updatePledgePaymentStatus = FALSE;
//do only if either the status or the amount has changed
if ($this->_action & CRM_Core_Action::ADD) {
$updatePledgePaymentStatus = TRUE;
}
elseif ($this->_action & CRM_Core_Action::UPDATE && (($this->_defaults['contribution_status_id'] != $formValues['contribution_status_id']) ||
($this->_defaults['total_amount'] != $formValues['total_amount']))
) {
$updatePledgePaymentStatus = TRUE;
}
if ($updatePledgePaymentStatus) {
CRM_Pledge_BAO_PledgePayment::updatePledgePaymentStatus($this->_pledgeID,
array($this->_ppID),
$contribution->contribution_status_id,
NULL,
$contribution->total_amount,
$adjustTotalAmount
);
}
}
$statusMsg = ts('The contribution record has been saved.');
if (!empty($formValues['is_email_receipt']) && $sendReceipt) {
$statusMsg .= ' ' . ts('A receipt has been emailed to the contributor.');
}
if ($relatedComponentStatusMsg) {
$statusMsg .= ' ' . $relatedComponentStatusMsg;
}
CRM_Core_Session::setStatus($statusMsg, ts('Saved'), 'success');
//Offline Contribution ends.
}
$session = CRM_Core_Session::singleton();
$buttonName = $this->controller->getButtonName();
if ($this->_context == 'standalone') {
if ($buttonName == $this->getButtonName('upload', 'new')) {
$session->replaceUserContext(CRM_Utils_System::url('civicrm/contribute/add',
'reset=1&action=add&context=standalone'
));
}
else {
$session->replaceUserContext(CRM_Utils_System::url('civicrm/contact/view',
"reset=1&cid={$this->_contactID}&selectedChild=contribute"
));
}
}
elseif ($this->_context == 'contribution' && $this->_mode && $buttonName == $this->getButtonName('upload', 'new')) {
$session->replaceUserContext(CRM_Utils_System::url('civicrm/contact/view/contribution',
"reset=1&action=add&context={$this->_context}&cid={$this->_contactID}&mode={$this->_mode}"
));
}
elseif ($buttonName == $this->getButtonName('upload', 'new')) {
$session->replaceUserContext(CRM_Utils_System::url('civicrm/contact/view/contribution',
"reset=1&action=add&context={$this->_context}&cid={$this->_contactID}"
));
}
//store contribution ID if not yet set (on create)
if (empty($this->_id) && !empty($contribution->id)) {
$this->_id = $contribution->id;
}
}
/**
* Process credit card payment.
*
* @param array $submittedValues
* @param array $lineItem
*
* @throws CRM_Core_Exception
*/
protected function processCreditCard($submittedValues, $lineItem) {
$sendReceipt = $contribution = FALSE;
$unsetParams = array(
'trxn_id',
'payment_instrument_id',
'contribution_status_id',
'cancel_date',
'cancel_reason',
);
foreach ($unsetParams as $key) {
if (isset($submittedValues[$key])) {
unset($submittedValues[$key]);
}
}
$isTest = ($this->_mode == 'test') ? 1 : 0;
// CRM-12680 set $_lineItem if its not set
if (empty($this->_lineItem) && !empty($lineItem)) {
$this->_lineItem = $lineItem;
}
//Get the require fields value only.
$params = $this->_params = $submittedValues;
$this->_paymentProcessor = CRM_Financial_BAO_PaymentProcessor::getPayment($this->_params['payment_processor_id'],
$this->_mode
);
// Get the payment processor id as per mode.
$this->_params['payment_processor'] = $params['payment_processor_id']
= $this->_params['payment_processor_id'] = $submittedValues['payment_processor_id'] = $this->_paymentProcessor['id'];
$now = date('YmdHis');
$fields = array();
// we need to retrieve email address
if ($this->_context == 'standalone' && !empty($submittedValues['is_email_receipt'])) {
list($this->userDisplayName,
$this->userEmail
) = CRM_Contact_BAO_Contact_Location::getEmailDetails($this->_contactID);
$this->assign('displayName', $this->userDisplayName);
}
// Set email for primary location.
$fields['email-Primary'] = 1;
$params['email-Primary'] = $this->userEmail;
// now set the values for the billing location.
foreach (array_keys($this->_fields) as $name) {
$fields[$name] = 1;
}
// also add location name to the array
$params["address_name-{$this->_bltID}"] = CRM_Utils_Array::value('billing_first_name', $params) . ' ' . CRM_Utils_Array::value('billing_middle_name', $params) . ' ' . CRM_Utils_Array::value('billing_last_name', $params);
$params["address_name-{$this->_bltID}"] = trim($params["address_name-{$this->_bltID}"]);
$fields["address_name-{$this->_bltID}"] = 1;
$ctype = CRM_Core_DAO::getFieldValue('CRM_Contact_DAO_Contact',
$this->_contactID,
'contact_type'
);
$nameFields = array('first_name', 'middle_name', 'last_name');
foreach ($nameFields as $name) {
$fields[$name] = 1;
if (array_key_exists("billing_$name", $params)) {
$params[$name] = $params["billing_{$name}"];
$params['preserveDBName'] = TRUE;
}
}
if (!empty($params['source'])) {
unset($params['source']);
}
$contactID = CRM_Contact_BAO_Contact::createProfileContact($params, $fields,
$this->_contactID,
NULL, NULL,
$ctype
);
// add all the additional payment params we need
if (!empty($this->_params["billing_state_province_id-{$this->_bltID}"])) {
$this->_params["state_province-{$this->_bltID}"] = $this->_params["billing_state_province-{$this->_bltID}"] = CRM_Core_PseudoConstant::stateProvinceAbbreviation($this->_params["billing_state_province_id-{$this->_bltID}"]);
}
if (!empty($this->_params["billing_country_id-{$this->_bltID}"])) {
$this->_params["country-{$this->_bltID}"] = $this->_params["billing_country-{$this->_bltID}"] = CRM_Core_PseudoConstant::countryIsoCode($this->_params["billing_country_id-{$this->_bltID}"]);
}
$legacyCreditCardExpiryCheck = FALSE;
if ($this->_paymentProcessor['payment_type'] & CRM_Core_Payment::PAYMENT_TYPE_CREDIT_CARD && !isset($this->_paymentFields)) {
$legacyCreditCardExpiryCheck = TRUE;
}
if ($legacyCreditCardExpiryCheck || in_array('credit_card_exp_date', array_keys($this->_paymentFields))) {
$this->_params['year'] = CRM_Core_Payment_Form::getCreditCardExpirationYear($this->_params);
$this->_params['month'] = CRM_Core_Payment_Form::getCreditCardExpirationMonth($this->_params);
}
$this->_params['ip_address'] = CRM_Utils_System::ipAddress();
$this->_params['amount'] = $this->_params['total_amount'];
$this->_params['amount_level'] = 0;
$this->_params['description'] = ts('Office Credit Card contribution');
$this->_params['currencyID'] = CRM_Utils_Array::value('currency',
$this->_params,
CRM_Core_Config::singleton()->defaultCurrency
);
$this->_params['payment_action'] = 'Sale';
if (!empty($this->_params['receive_date'])) {
$this->_params['receive_date'] = CRM_Utils_Date::processDate($this->_params['receive_date'], $this->_params['receive_date_time']);
}
if (!empty($params['soft_credit_to'])) {
$this->_params['soft_credit_to'] = $params['soft_credit_to'];
$this->_params['pcp_made_through_id'] = $params['pcp_made_through_id'];
}
$this->_params['pcp_display_in_roll'] = CRM_Utils_Array::value('pcp_display_in_roll', $params);
$this->_params['pcp_roll_nickname'] = CRM_Utils_Array::value('pcp_roll_nickname', $params);
$this->_params['pcp_personal_note'] = CRM_Utils_Array::value('pcp_personal_note', $params);
//Add common data to formatted params
CRM_Contribute_Form_AdditionalInfo::postProcessCommon($params, $this->_params, $this);
if (empty($this->_params['invoice_id'])) {
$this->_params['invoiceID'] = md5(uniqid(rand(), TRUE));
}
else {
$this->_params['invoiceID'] = $this->_params['invoice_id'];
}
// At this point we've created a contact and stored its address etc
// all the payment processors expect the name and address to be in the
// so we copy stuff over to first_name etc.
$paymentParams = $this->_params;
$paymentParams['contactID'] = $this->_contactID;
CRM_Core_Payment_Form::mapParams($this->_bltID, $this->_params, $paymentParams, TRUE);
$contributionType = new CRM_Financial_DAO_FinancialType();
$contributionType->id = $params['financial_type_id'];
// Add some financial type details to the params list
// if folks need to use it.
$paymentParams['contributionType_name'] = $this->_params['contributionType_name'] = $contributionType->name;
$paymentParams['contributionPageID'] = NULL;
if (!empty($this->_params['is_email_receipt'])) {
$paymentParams['email'] = $this->userEmail;
$paymentParams['is_email_receipt'] = 1;
}
else {
$paymentParams['is_email_receipt'] = 0;
$this->_params['is_email_receipt'] = 0;
}
if (!empty($this->_params['receive_date'])) {
$paymentParams['receive_date'] = $this->_params['receive_date'];
}
// For recurring contribution, create Contribution Record first.
// Contribution ID, Recurring ID and Contact ID needed
// When we get a callback from the payment processor, CRM-7115
if (!empty($paymentParams['is_recur'])) {
$contribution = CRM_Contribute_Form_Contribution_Confirm::processContribution($this,
$this->_params,
NULL,
$this->_contactID,
$contributionType,
TRUE,
FALSE,
$isTest,
$this->_lineItem
);
$paymentParams['contributionID'] = $contribution->id;
$paymentParams['contributionTypeID'] = $contribution->financial_type_id;
$paymentParams['contributionPageID'] = $contribution->contribution_page_id;
$paymentParams['contributionRecurID'] = $contribution->contribution_recur_id;
}
$result = array();
if ($paymentParams['amount'] > 0.0) {
// force a re-get of the payment processor in case the form changed it, CRM-7179
$payment = CRM_Core_Payment::singleton($this->_mode, $this->_paymentProcessor, $this, TRUE);
try {
$result = $payment->doPayment($paymentParams, 'contribute');
}
catch (CRM_Core_Exception $e) {
$message = ts("Payment Processor Error message") . $e->getMessage();
$this->cleanupDBAfterPaymentFailure($paymentParams, $message);
// Set the contribution mode.
$urlParams = "action=add&cid={$this->_contactID}";
if ($this->_mode) {
$urlParams .= "&mode={$this->_mode}";
}
if (!empty($this->_ppID)) {
$urlParams .= "&context=pledge&ppid={$this->_ppID}";
}
CRM_Core_Error::statusBounce($message, $urlParams, ts('Payment Processor Error'));
}
}
$this->_params = array_merge($this->_params, $result);
$this->_params['receive_date'] = $now;
if (!empty($this->_params['is_email_receipt'])) {
$this->_params['receipt_date'] = $now;
}
else {
$this->_params['receipt_date'] = CRM_Utils_Date::processDate($this->_params['receipt_date'],
$params['receipt_date_time'], TRUE
);
}
$this->set('params', $this->_params);
$this->assign('trxn_id', $result['trxn_id']);
$this->assign('receive_date', $this->_params['receive_date']);
// Result has all the stuff we need
// lets archive it to a financial transaction
if ($contributionType->is_deductible) {
$this->assign('is_deductible', TRUE);
$this->set('is_deductible', TRUE);
}
// Set source if not set
if (empty($this->_params['source'])) {
$userID = CRM_Core_Session::singleton()->get('userID');
$userSortName = CRM_Core_DAO::getFieldValue('CRM_Contact_DAO_Contact', $userID,
'sort_name'
);
$this->_params['source'] = ts('Submit Credit Card Payment by: %1', array(1 => $userSortName));
}
// Build custom data getFields array
$customFieldsContributionType = CRM_Core_BAO_CustomField::getFields('Contribution', FALSE, FALSE,
CRM_Utils_Array::value('financial_type_id', $params)
);
$customFields = CRM_Utils_Array::crmArrayMerge($customFieldsContributionType,
CRM_Core_BAO_CustomField::getFields('Contribution', FALSE, FALSE, NULL, NULL, TRUE)
);
$params['custom'] = CRM_Core_BAO_CustomField::postProcess($params,
$customFields,
$this->_id,
'Contribution'
);
if (empty($paymentParams['is_recur'])) {
$contribution = CRM_Contribute_Form_Contribution_Confirm::processContribution($this,
$this->_params,
$result,
$this->_contactID,
$contributionType,
FALSE, FALSE,
$isTest,
$this->_lineItem
);
}
// Send receipt mail.
if ($contribution->id && !empty($this->_params['is_email_receipt'])) {
$this->_params['trxn_id'] = CRM_Utils_Array::value('trxn_id', $result);
$this->_params['contact_id'] = $this->_contactID;
$this->_params['contribution_id'] = $contribution->id;
$sendReceipt = CRM_Contribute_Form_AdditionalInfo::emailReceipt($this, $this->_params, TRUE);
}
//process the note
if ($contribution->id && isset($params['note'])) {
CRM_Contribute_Form_AdditionalInfo::processNote($params, $contactID, $contribution->id, NULL);
}
//process premium
if ($contribution->id && isset($params['product_name'][0])) {
CRM_Contribute_Form_AdditionalInfo::processPremium($params, $contribution->id, NULL, $this->_options);
}
//update pledge payment status.
if ($this->_ppID && $contribution->id) {
// Store contribution id in payment record.
CRM_Core_DAO::setFieldValue('CRM_Pledge_DAO_PledgePayment', $this->_ppID, 'contribution_id', $contribution->id);
CRM_Pledge_BAO_PledgePayment::updatePledgePaymentStatus($this->_pledgeID,
array($this->_ppID),
$contribution->contribution_status_id,
NULL,
$contribution->total_amount
);
}
if ($contribution->id) {
$statusMsg = ts('The contribution record has been processed.');
if (!empty($this->_params['is_email_receipt']) && $sendReceipt) {
$statusMsg .= ' ' . ts('A receipt has been emailed to the contributor.');
}
CRM_Core_Session::setStatus($statusMsg, ts('Complete'), 'success');
}
}
/**
* Clean up DB after payment fails.
*
* This function removes related DB entries. Note that it has been agreed in principle,
* but not implemented, that contributions should be retained as 'Failed' rather than
* deleted.
*
* @todo it doesn't clean up line items.
*
* @param array $paymentParams
* @param string $message
*/
public function cleanupDBAfterPaymentFailure($paymentParams, $message) {
// Make sure to cleanup db for recurring case.
if (!empty($paymentParams['contributionID'])) {
CRM_Core_Error::debug_log_message($message .
"contact id={$this->_contactID} (deleting contribution {$paymentParams['contributionID']}");
CRM_Contribute_BAO_Contribution::deleteContribution($paymentParams['contributionID']);
}
if (!empty($paymentParams['contributionRecurID'])) {
CRM_Core_Error::debug_log_message($message .
"contact id={$this->_contactID} (deleting recurring contribution {$paymentParams['contributionRecurID']}");
CRM_Contribute_BAO_ContributionRecur::deleteRecurContribution($paymentParams['contributionRecurID']);
}
}
/**
* Generate the data to construct a snippet based pane.
*
* This form also assigns the showAdditionalInfo var based on historical code.
* This appears to mean 'there is a pane to show'.
*
* @param string $type
* Type of Pane - this is generally used to determine the function name used to build it
* - e.g CreditCard, AdditionalDetail
* @param array $defaults
*
* @return array
* We aim to further refactor & simplify this but currently
* - the panes array
* - should additional info be shown?
*/
protected function generatePane($type, $defaults) {
$urlParams = "snippet=4&formType={$type}";
if ($this->_mode) {
$urlParams .= "&mode={$this->_mode}";
}
$open = 'false';
if ($type == 'CreditCard' ||
$type == 'DirectDebit'
) {
$open = 'true';
}
$pane = array(
'url' => CRM_Utils_System::url('civicrm/contact/view/contribution', $urlParams),
'open' => $open,
'id' => $type,
);
// See if we need to include this paneName in the current form.
if ($this->_formType == $type || !empty($_POST["hidden_{$type}"]) ||
CRM_Utils_Array::value("hidden_{$type}", $defaults)
) {
$this->assign('showAdditionalInfo', TRUE);
$pane['open'] = 'true';
}
if ($type == 'CreditCard' || $type == 'DirectDebit') {
// @todo would be good to align tpl name with form name...
// @todo document why this hidden variable is required.
$this->add('hidden', 'hidden_' . $type, 1);
return $pane;
}
else {
$additionalInfoFormFunction = 'build' . $type;
CRM_Contribute_Form_AdditionalInfo::$additionalInfoFormFunction($this);
return $pane;
}
}
}
| gpl-2.0 |
tonvinh/ez | vendor/ezsystems/ezpublish-kernel/eZ/Publish/Core/REST/Server/Values/CountryList.php | 681 | <?php
/**
* File containing the CountryList class
*
* @copyright Copyright (C) eZ Systems AS. All rights reserved.
* @license For full copyright and license information view LICENSE file distributed with this source code.
* @version 2014.07.0
*/
namespace eZ\Publish\Core\REST\Server\Values;
use eZ\Publish\Core\REST\Common\Value as RestValue;
/**
* Country list view model
*/
class CountryList extends RestValue
{
/**
* @var \eZ\Publish\API\Repository\Values\ContentType\Countries[]
*/
public $countries;
/**
* Construct
*
*/
public function __construct( array $countries )
{
$this->countries = $countries;
}
}
| gpl-2.0 |
digidudeofdw/enigma2 | lib/dvb/scan.cpp | 36702 | #include <lib/dvb/idvb.h>
#include <dvbsi++/descriptor_tag.h>
#include <dvbsi++/service_descriptor.h>
#include <dvbsi++/satellite_delivery_system_descriptor.h>
#include <dvbsi++/terrestrial_delivery_system_descriptor.h>
#include <dvbsi++/cable_delivery_system_descriptor.h>
#include <dvbsi++/ca_identifier_descriptor.h>
#include <dvbsi++/registration_descriptor.h>
#include <lib/dvb/specs.h>
#include <lib/dvb/esection.h>
#include <lib/dvb/scan.h>
#include <lib/dvb/frontend.h>
#include <lib/base/eenv.h>
#include <lib/base/eerror.h>
#include <lib/base/estring.h>
#include <lib/dvb/dvb.h>
#include <lib/dvb/db.h>
#include <lib/python/python.h>
#include <errno.h>
#define SCAN_eDebug(x...) do { if (m_scan_debug) eDebug(x); } while(0)
#define SCAN_eDebugNoNewLine(x...) do { if (m_scan_debug) eDebugNoNewLine(x); } while(0)
DEFINE_REF(eDVBScan);
eDVBScan::eDVBScan(iDVBChannel *channel, bool usePAT, bool debug)
:m_channel(channel), m_channel_state(iDVBChannel::state_idle)
,m_ready(0), m_ready_all(usePAT ? (readySDT|readyPAT) : readySDT)
,m_pmt_running(false), m_abort_current_pmt(false), m_flags(0)
,m_usePAT(usePAT), m_scan_debug(debug), m_show_add_tsid_onid_check_failed_msg(true)
{
if (m_channel->getDemux(m_demux))
SCAN_eDebug("scan: failed to allocate demux!");
m_channel->connectStateChange(slot(*this, &eDVBScan::stateChange), m_stateChanged_connection);
std::string filename = eEnv::resolve("${sysconfdir}/scan_tp_valid_check.py");
FILE *f = fopen(filename.c_str(), "r");
if (f)
{
char code[16384];
size_t rd = fread(code, 1, 16383, f);
if (rd)
{
code[rd]=0;
m_additional_tsid_onid_check_func = Py_CompileString(code, filename.c_str(), Py_file_input);
}
fclose(f);
}
}
eDVBScan::~eDVBScan()
{
if (m_additional_tsid_onid_check_func)
Py_DECREF(m_additional_tsid_onid_check_func);
}
int eDVBScan::isValidONIDTSID(int orbital_position, eOriginalNetworkID onid, eTransportStreamID tsid)
{
/*
* Assume cable and terrestrial ONIDs/TSIDs are always valid,
* don't check them against the satellite blacklist.
*/
if (orbital_position == 0xFFFF || orbital_position == 0xEEEE) return 1;
int ret;
switch (onid.get())
{
case 0:
case 0x1111:
ret=0;
break;
case 0x13E: // workaround for 11258H and 11470V on hotbird with same ONID/TSID (0x13E/0x578)
ret = orbital_position != 130 || tsid != 0x578;
break;
case 1:
ret = orbital_position == 192;
break;
case 0x00B1:
ret = tsid != 0x00B0;
break;
case 0x00eb:
ret = tsid != 0x4321;
break;
case 0x0002:
ret = abs(orbital_position-282) < 6 && tsid != 2019;
// 12070H and 10936V have same tsid/onid.. but even the same services are provided
break;
case 0x2000:
ret = tsid != 0x1000;
break;
case 0x5E: // Sirius 4.8E 12322V and 12226H
ret = abs(orbital_position-48) < 3 && tsid != 1;
break;
case 10100: // Eutelsat W7 36.0E 11644V and 11652V
ret = orbital_position != 360 || tsid != 10187;
break;
case 42: // Tuerksat 42.0E
ret = orbital_position != 420 || (
tsid != 8 && // 11830V 12729V
tsid != 5 && // 12679V 12685H
tsid != 2 && // 11096V 12015H
tsid != 55); // 11996V 11716V
break;
case 100: // Intelsat 10 68.5E 3808V 3796V 4012V, Amos 4.0W 10723V 11571H
ret = (orbital_position != 685 && orbital_position != 3560) || tsid != 1;
break;
case 70: // Thor 0.8W 11862H 12341V
ret = abs(orbital_position-3592) < 3 && tsid != 46;
break;
case 32: // NSS 806 (40.5W) 4059R, 3774L
ret = orbital_position != 3195 || tsid != 21;
break;
default:
ret = onid.get() < 0xFF00;
break;
}
if (ret && m_additional_tsid_onid_check_func)
{
bool failed = true;
ePyObject dict = PyDict_New();
extern void PutToDict(ePyObject &, const char *, long);
PyDict_SetItemString(dict, "__builtins__", PyEval_GetBuiltins());
PutToDict(dict, "orbpos", orbital_position);
PutToDict(dict, "tsid", tsid.get());
PutToDict(dict, "onid", onid.get());
ePyObject r = PyEval_EvalCode((PyCodeObject*)(PyObject*)m_additional_tsid_onid_check_func, dict, dict);
if (r)
{
ePyObject o = PyDict_GetItemString(dict, "ret");
if (o)
{
if (PyInt_Check(o))
{
ret = PyInt_AsLong(o);
failed = false;
}
}
Py_DECREF(r);
}
if (failed && m_show_add_tsid_onid_check_failed_msg)
{
eDebug("execing /etc/enigma2/scan_tp_valid_check failed!\n"
"usable global variables in scan_tp_valid_check.py are 'orbpos', 'tsid', 'onid'\n"
"the return value must be stored in a global var named 'ret'");
m_show_add_tsid_onid_check_failed_msg=false;
}
Py_DECREF(dict);
}
return ret;
}
eDVBNamespace eDVBScan::buildNamespace(eOriginalNetworkID onid, eTransportStreamID tsid, unsigned long hash)
{
// on valid ONIDs, ignore frequency ("sub network") part
if (isValidONIDTSID((hash >> 16) & 0xFFFF, onid, tsid))
hash &= ~0xFFFF;
return eDVBNamespace(hash);
}
void eDVBScan::stateChange(iDVBChannel *ch)
{
int state;
if (ch->getState(state))
return;
if (m_channel_state == state)
return;
if (state == iDVBChannel::state_ok)
{
startFilter();
m_channel_state = state;
} else if (state == iDVBChannel::state_failed)
{
m_ch_unavailable.push_back(m_ch_current);
nextChannel();
}
/* unavailable will timeout, anyway. */
}
RESULT eDVBScan::nextChannel()
{
ePtr<iDVBFrontend> fe;
m_SDT = 0; m_PAT = 0; m_BAT = 0; m_NIT = 0, m_PMT = 0;
m_ready = 0;
m_pat_tsid = eTransportStreamID();
/* check what we need */
m_ready_all = readySDT;
if (m_flags & scanNetworkSearch)
m_ready_all |= readyNIT;
if (m_flags & scanSearchBAT)
m_ready_all |= readyBAT;
if (m_usePAT)
m_ready_all |= readyPAT;
if (m_ch_toScan.empty())
{
SCAN_eDebug("no channels left to scan.");
SCAN_eDebug("%zd channels scanned, %zd were unavailable.",
m_ch_scanned.size(), m_ch_unavailable.size());
SCAN_eDebug("%zd channels in database.", m_new_channels.size());
m_event(evtFinish);
return -ENOENT;
}
m_ch_current = m_ch_toScan.front();
m_ch_toScan.pop_front();
if (m_channel->getFrontend(fe))
{
m_event(evtFail);
return -ENOTSUP;
}
m_chid_current = eDVBChannelID();
m_channel_state = iDVBChannel::state_idle;
if (fe->tune(*m_ch_current))
return nextChannel();
m_event(evtUpdate);
return 0;
}
RESULT eDVBScan::startFilter()
{
bool startSDT=true;
ASSERT(m_demux);
/* only start required filters filter */
if (m_ready_all & readyPAT)
startSDT = m_ready & readyPAT;
// m_ch_current is not set, when eDVBScan is just used for a SDT update
if (!m_ch_current)
{
unsigned int channelFlags;
m_channel->getCurrentFrontendParameters(m_ch_current);
m_ch_current->getFlags(channelFlags);
if (channelFlags & iDVBFrontendParameters::flagOnlyFree)
m_flags |= scanOnlyFree;
}
m_SDT = 0;
if (startSDT && (m_ready_all & readySDT))
{
m_SDT = new eTable<ServiceDescriptionSection>;
int tsid=-1;
if (m_ready & readyPAT && m_ready & validPAT)
{
std::vector<ProgramAssociationSection*>::const_iterator i =
m_PAT->getSections().begin();
ASSERT(i != m_PAT->getSections().end());
tsid = (*i)->getTableIdExtension(); // in PAT this is the transport stream id
m_pat_tsid = eTransportStreamID(tsid);
for (; i != m_PAT->getSections().end(); ++i)
{
const ProgramAssociationSection &pat = **i;
ProgramAssociationConstIterator program = pat.getPrograms()->begin();
for (; program != pat.getPrograms()->end(); ++program)
m_pmts_to_read.insert(std::pair<unsigned short, service>((*program)->getProgramNumber(), service((*program)->getProgramMapPid())));
}
m_PMT = new eTable<ProgramMapSection>;
CONNECT(m_PMT->tableReady, eDVBScan::PMTready);
PMTready(-2);
// KabelBW HACK ... on 618Mhz and 626Mhz the transport stream id in PAT and SDT is different
{
int type;
m_ch_current->getSystem(type);
if (type == iDVBFrontend::feCable)
{
eDVBFrontendParametersCable parm;
m_ch_current->getDVBC(parm);
if ((tsid == 0x00d7 && abs(parm.frequency-618000) < 2000) ||
(tsid == 0x00d8 && abs(parm.frequency-626000) < 2000))
tsid = -1;
}
}
}
if (tsid == -1)
{
if (m_SDT->start(m_demux, eDVBSDTSpec()))
return -1;
}
else if (m_SDT->start(m_demux, eDVBSDTSpec(tsid, true)))
return -1;
CONNECT(m_SDT->tableReady, eDVBScan::SDTready);
}
if (!(m_ready & readyPAT))
{
m_PAT = 0;
if (m_ready_all & readyPAT)
{
m_PAT = new eTable<ProgramAssociationSection>;
if (m_PAT->start(m_demux, eDVBPATSpec(4000)))
return -1;
CONNECT(m_PAT->tableReady, eDVBScan::PATready);
}
m_NIT = 0;
if (m_ready_all & readyNIT)
{
m_NIT = new eTable<NetworkInformationSection>;
if (m_NIT->start(m_demux, eDVBNITSpec(m_networkid)))
return -1;
CONNECT(m_NIT->tableReady, eDVBScan::NITready);
}
m_BAT = 0;
if (m_ready_all & readyBAT)
{
m_BAT = new eTable<BouquetAssociationSection>;
if (m_BAT->start(m_demux, eDVBBATSpec()))
return -1;
CONNECT(m_BAT->tableReady, eDVBScan::BATready);
}
}
return 0;
}
void eDVBScan::SDTready(int err)
{
SCAN_eDebug("got sdt %d", err);
m_ready |= readySDT;
if (!err)
m_ready |= validSDT;
channelDone();
}
void eDVBScan::NITready(int err)
{
SCAN_eDebug("got nit, err %d", err);
m_ready |= readyNIT;
if (!err)
m_ready |= validNIT;
channelDone();
}
void eDVBScan::BATready(int err)
{
SCAN_eDebug("got bat");
m_ready |= readyBAT;
if (!err)
m_ready |= validBAT;
channelDone();
}
void eDVBScan::PATready(int err)
{
SCAN_eDebug("got pat");
m_ready |= readyPAT;
if (!err)
m_ready |= validPAT;
startFilter(); // for starting the SDT filter
}
void eDVBScan::PMTready(int err)
{
SCAN_eDebug("got pmt %d", err);
if (!err)
{
bool scrambled = false;
bool have_audio = false;
bool have_video = false;
unsigned short pcrpid = 0xFFFF;
std::vector<ProgramMapSection*>::const_iterator i;
for (i = m_PMT->getSections().begin(); i != m_PMT->getSections().end(); ++i)
{
const ProgramMapSection &pmt = **i;
if (pcrpid == 0xFFFF)
pcrpid = pmt.getPcrPid();
else
SCAN_eDebug("already have a pcrpid %04x %04x", pcrpid, pmt.getPcrPid());
ElementaryStreamInfoConstIterator es;
for (es = pmt.getEsInfo()->begin(); es != pmt.getEsInfo()->end(); ++es)
{
int isaudio = 0, isvideo = 0, is_scrambled = 0, forced_audio = 0, forced_video = 0;
switch ((*es)->getType())
{
case 0x1b: // AVC Video Stream (MPEG4 H264)
case 0x10: // MPEG 4 Part 2
case 0x01: // MPEG 1 video
case 0x02: // MPEG 2 video
isvideo = 1;
forced_video = 1;
//break; fall through !!!
case 0x03: // MPEG 1 audio
case 0x04: // MPEG 2 audio
case 0x0f: // MPEG 2 AAC
case 0x11: // MPEG 4 AAC
if (!isvideo)
{
forced_audio = 1;
isaudio = 1;
}
case 0x06: // PES Private
case 0x81: // user private
case 0xEA: // TS_PSI_ST_SMPTE_VC1
for (DescriptorConstIterator desc = (*es)->getDescriptors()->begin();
desc != (*es)->getDescriptors()->end(); ++desc)
{
uint8_t tag = (*desc)->getTag();
/* PES private can contain AC-3, DTS or lots of other stuff.
check descriptors to get the exakt type. */
if (!forced_video && !forced_audio)
{
switch (tag)
{
case 0x1C: // TS_PSI_DT_MPEG4_Audio
case 0x2B: // TS_PSI_DT_MPEG2_AAC
case AAC_DESCRIPTOR:
case AC3_DESCRIPTOR:
case DTS_DESCRIPTOR:
case AUDIO_STREAM_DESCRIPTOR:
isaudio = 1;
break;
case 0x28: // TS_PSI_DT_AVC
case 0x1B: // TS_PSI_DT_MPEG4_Video
case VIDEO_STREAM_DESCRIPTOR:
isvideo = 1;
break;
case REGISTRATION_DESCRIPTOR: /* some services don't have a separate AC3 descriptor */
{
RegistrationDescriptor *d = (RegistrationDescriptor*)(*desc);
switch (d->getFormatIdentifier())
{
case 0x44545331 ... 0x44545333: // DTS1/DTS2/DTS3
case 0x41432d33: // == 'AC-3'
case 0x42535344: // == 'BSSD' (LPCM)
isaudio = 1;
break;
case 0x56432d31: // == 'VC-1'
isvideo = 1;
break;
default:
break;
}
}
default:
break;
}
}
if (tag == CA_DESCRIPTOR)
is_scrambled = 1;
}
default:
break;
}
if (isvideo)
have_video = true;
else if (isaudio)
have_audio = true;
else
continue;
if (is_scrambled)
scrambled = true;
}
for (DescriptorConstIterator desc = pmt.getDescriptors()->begin();
desc != pmt.getDescriptors()->end(); ++desc)
{
if ((*desc)->getTag() == CA_DESCRIPTOR)
scrambled = true;
}
}
m_pmt_in_progress->second.scrambled = scrambled;
if ( have_video )
m_pmt_in_progress->second.serviceType = 1;
else if ( have_audio )
m_pmt_in_progress->second.serviceType = 2;
else
m_pmt_in_progress->second.serviceType = 100;
}
if (err == -1) // timeout or removed by sdt
m_pmts_to_read.erase(m_pmt_in_progress++);
else if (m_pmt_running)
++m_pmt_in_progress;
else
{
m_pmt_in_progress = m_pmts_to_read.begin();
m_pmt_running = true;
}
if (m_pmt_in_progress != m_pmts_to_read.end())
m_PMT->start(m_demux, eDVBPMTSpec(m_pmt_in_progress->second.pmtPid, m_pmt_in_progress->first, 4000));
else
{
m_PMT = 0;
m_pmt_running = false;
channelDone();
}
}
void eDVBScan::addKnownGoodChannel(const eDVBChannelID &chid, iDVBFrontendParameters *feparm)
{
/* add it to the list of known channels. */
if (chid)
m_new_channels.insert(std::pair<eDVBChannelID,ePtr<iDVBFrontendParameters> >(chid, feparm));
}
void eDVBScan::addChannelToScan(const eDVBChannelID &chid, iDVBFrontendParameters *feparm)
{
/* check if we don't already have that channel ... */
int type;
feparm->getSystem(type);
switch(type)
{
case iDVBFrontend::feSatellite:
{
eDVBFrontendParametersSatellite parm;
feparm->getDVBS(parm);
SCAN_eDebug("try to add %d %d %d %d %d %d",
parm.orbital_position, parm.frequency, parm.symbol_rate, parm.polarisation, parm.fec, parm.modulation);
break;
}
case iDVBFrontend::feCable:
{
eDVBFrontendParametersCable parm;
feparm->getDVBC(parm);
SCAN_eDebug("try to add %d %d %d %d",
parm.frequency, parm.symbol_rate, parm.modulation, parm.fec_inner);
break;
}
case iDVBFrontend::feTerrestrial:
{
eDVBFrontendParametersTerrestrial parm;
feparm->getDVBT(parm);
SCAN_eDebug("try to add %d %d %d %d %d %d %d %d",
parm.frequency, parm.modulation, parm.transmission_mode, parm.hierarchy,
parm.guard_interval, parm.code_rate_LP, parm.code_rate_HP, parm.bandwidth);
break;
}
}
int found_count=0;
/* ... in the list of channels to scan */
for (std::list<ePtr<iDVBFrontendParameters> >::iterator i(m_ch_toScan.begin()); i != m_ch_toScan.end();)
{
if (sameChannel(*i, feparm))
{
if (!found_count)
{
*i = feparm; // update
SCAN_eDebug("update");
}
else
{
SCAN_eDebug("remove dupe");
m_ch_toScan.erase(i++);
continue;
}
++found_count;
}
++i;
}
if (found_count > 0)
{
SCAN_eDebug("already in todo list");
return;
}
/* ... in the list of successfully scanned channels */
for (std::list<ePtr<iDVBFrontendParameters> >::const_iterator i(m_ch_scanned.begin()); i != m_ch_scanned.end(); ++i)
if (sameChannel(*i, feparm))
{
SCAN_eDebug("successfully scanned");
return;
}
/* ... in the list of unavailable channels */
for (std::list<ePtr<iDVBFrontendParameters> >::const_iterator i(m_ch_unavailable.begin()); i != m_ch_unavailable.end(); ++i)
if (sameChannel(*i, feparm, true))
{
SCAN_eDebug("scanned but not available");
return;
}
/* ... on the current channel */
if (sameChannel(m_ch_current, feparm))
{
SCAN_eDebug("is current");
return;
}
SCAN_eDebug("really add");
/* otherwise, add it to the todo list. */
m_ch_toScan.push_front(feparm); // better.. then the rotor not turning wild from east to west :)
}
int eDVBScan::sameChannel(iDVBFrontendParameters *ch1, iDVBFrontendParameters *ch2, bool exact) const
{
int diff;
if (ch1->calculateDifference(ch2, diff, exact))
return 0;
if (diff < 4000) // more than 4mhz difference?
return 1;
return 0;
}
void eDVBScan::channelDone()
{
if (m_ready & validSDT && (!(m_flags & scanOnlyFree) || !m_pmt_running))
{
unsigned long hash = 0;
m_ch_current->getHash(hash);
eDVBNamespace dvbnamespace = buildNamespace(
(**m_SDT->getSections().begin()).getOriginalNetworkId(),
(**m_SDT->getSections().begin()).getTransportStreamId(),
hash);
SCAN_eDebug("SDT: ");
std::vector<ServiceDescriptionSection*>::const_iterator i;
for (i = m_SDT->getSections().begin(); i != m_SDT->getSections().end(); ++i)
processSDT(dvbnamespace, **i);
m_ready &= ~validSDT;
}
if (m_ready & validNIT)
{
int system;
std::list<ePtr<iDVBFrontendParameters> > m_ch_toScan_backup;
m_ch_current->getSystem(system);
SCAN_eDebug("dumping NIT");
if (m_flags & clearToScanOnFirstNIT)
{
m_ch_toScan_backup = m_ch_toScan;
m_ch_toScan.clear();
}
std::vector<NetworkInformationSection*>::const_iterator i;
for (i = m_NIT->getSections().begin(); i != m_NIT->getSections().end(); ++i)
{
const TransportStreamInfoList &tsinfovec = *(*i)->getTsInfo();
for (TransportStreamInfoConstIterator tsinfo(tsinfovec.begin());
tsinfo != tsinfovec.end(); ++tsinfo)
{
SCAN_eDebug("TSID: %04x ONID: %04x", (*tsinfo)->getTransportStreamId(),
(*tsinfo)->getOriginalNetworkId());
eOriginalNetworkID onid = (*tsinfo)->getOriginalNetworkId();
eTransportStreamID tsid = (*tsinfo)->getTransportStreamId();
for (DescriptorConstIterator desc = (*tsinfo)->getDescriptors()->begin();
desc != (*tsinfo)->getDescriptors()->end(); ++desc)
{
switch ((*desc)->getTag())
{
case CABLE_DELIVERY_SYSTEM_DESCRIPTOR:
{
if (system != iDVBFrontend::feCable)
break; // when current locked transponder is no cable transponder ignore this descriptor
CableDeliverySystemDescriptor &d = (CableDeliverySystemDescriptor&)**desc;
ePtr<eDVBFrontendParameters> feparm = new eDVBFrontendParameters;
eDVBFrontendParametersCable cable;
cable.set(d);
feparm->setDVBC(cable);
unsigned long hash=0;
feparm->getHash(hash);
eDVBNamespace ns = buildNamespace(onid, tsid, hash);
addChannelToScan(
eDVBChannelID(ns, tsid, onid),
feparm);
break;
}
case TERRESTRIAL_DELIVERY_SYSTEM_DESCRIPTOR:
{
if (system != iDVBFrontend::feTerrestrial)
break; // when current locked transponder is no terrestrial transponder ignore this descriptor
TerrestrialDeliverySystemDescriptor &d = (TerrestrialDeliverySystemDescriptor&)**desc;
ePtr<eDVBFrontendParameters> feparm = new eDVBFrontendParameters;
eDVBFrontendParametersTerrestrial terr;
terr.set(d);
feparm->setDVBT(terr);
unsigned long hash=0;
feparm->getHash(hash);
eDVBNamespace ns = buildNamespace(onid, tsid, hash);
addChannelToScan(
eDVBChannelID(ns, tsid, onid),
feparm);
break;
}
case SATELLITE_DELIVERY_SYSTEM_DESCRIPTOR:
{
if (system != iDVBFrontend::feSatellite)
break; // when current locked transponder is no satellite transponder ignore this descriptor
SatelliteDeliverySystemDescriptor &d = (SatelliteDeliverySystemDescriptor&)**desc;
if (d.getFrequency() < 10000)
break;
ePtr<eDVBFrontendParameters> feparm = new eDVBFrontendParameters;
eDVBFrontendParametersSatellite sat;
sat.set(d);
eDVBFrontendParametersSatellite p;
m_ch_current->getDVBS(p);
if ( abs(p.orbital_position - sat.orbital_position) < 5 )
sat.orbital_position = p.orbital_position;
if ( abs(abs(3600 - p.orbital_position) - sat.orbital_position) < 5 )
{
SCAN_eDebug("found transponder with incorrect west/east flag ... correct this");
sat.orbital_position = p.orbital_position;
}
feparm->setDVBS(sat);
if ( p.orbital_position != sat.orbital_position)
SCAN_eDebug("dropping this transponder, it's on another satellite.");
else
{
unsigned long hash=0;
feparm->getHash(hash);
addChannelToScan(
eDVBChannelID(buildNamespace(onid, tsid, hash), tsid, onid),
feparm);
}
break;
}
default:
SCAN_eDebug("descr<%x>", (*desc)->getTag());
break;
}
}
}
}
/* a pitfall is to have the clearToScanOnFirstNIT-flag set, and having channels which have
no or invalid NIT. this code will not erase the toScan list unless at least one valid entry
has been found.
This is not a perfect solution, as the channel could contain a partial NIT. Life's bad.
*/
if (m_flags & clearToScanOnFirstNIT)
{
if (m_ch_toScan.empty())
{
eWarning("clearToScanOnFirstNIT was set, but NIT is invalid. Refusing to stop scan.");
m_ch_toScan = m_ch_toScan_backup;
} else
m_flags &= ~clearToScanOnFirstNIT;
}
m_ready &= ~validNIT;
}
if (m_pmt_running || (m_ready & m_ready_all) != m_ready_all)
{
if (m_abort_current_pmt)
{
m_abort_current_pmt = false;
PMTready(-1);
}
return;
}
SCAN_eDebug("channel done!");
/* if we had services on this channel, we declare
this channels as "known good". add it.
(TODO: not yet implemented)
a NIT entry could have possible overridden
our frontend data with more exact data.
(TODO: not yet implemented)
the tuning process could have lead to more
exact data than the user entered.
The channel id was probably corrected
by the data written in the SDT. this is
important, as "initial transponder lists"
usually don't have valid CHIDs (and that's
good).
These are the reasons for adding the transponder
here, and not before.
*/
int type;
if (m_ch_current->getSystem(type))
type = -1;
for (m_pmt_in_progress = m_pmts_to_read.begin(); m_pmt_in_progress != m_pmts_to_read.end();)
{
eServiceReferenceDVB ref;
ePtr<eDVBService> service = new eDVBService;
if (!m_chid_current)
{
unsigned long hash = 0;
m_ch_current->getHash(hash);
m_chid_current = eDVBChannelID(
buildNamespace(eOriginalNetworkID(0), m_pat_tsid, hash),
m_pat_tsid, eOriginalNetworkID(0));
}
if (m_pmt_in_progress->second.serviceType == 1)
SCAN_eDebug("SID %04x is VIDEO", m_pmt_in_progress->first);
else if (m_pmt_in_progress->second.serviceType == 2)
SCAN_eDebug("SID %04x is AUDIO", m_pmt_in_progress->first);
else
SCAN_eDebug("SID %04x is DATA", m_pmt_in_progress->first);
ref.set(m_chid_current);
ref.setServiceID(m_pmt_in_progress->first);
ref.setServiceType(m_pmt_in_progress->second.serviceType);
if (type != -1)
{
char sname[255];
char pname[255];
memset(pname, 0, sizeof(pname));
memset(sname, 0, sizeof(sname));
switch(type)
{
case iDVBFrontend::feSatellite:
{
eDVBFrontendParametersSatellite parm;
m_ch_current->getDVBS(parm);
snprintf(sname, 255, "%d%c SID 0x%02x",
parm.frequency/1000,
parm.polarisation ? 'V' : 'H',
m_pmt_in_progress->first);
snprintf(pname, 255, "%s %s %d%c %d.%d°%c",
parm.system ? "DVB-S2" : "DVB-S",
parm.modulation == 1 ? "QPSK" : "8PSK",
parm.frequency/1000,
parm.polarisation ? 'V' : 'H',
parm.orbital_position/10,
parm.orbital_position%10,
parm.orbital_position > 0 ? 'E' : 'W');
break;
}
case iDVBFrontend::feTerrestrial:
{
eDVBFrontendParametersTerrestrial parm;
m_ch_current->getDVBT(parm);
snprintf(sname, 255, "%d SID 0x%02x",
parm.frequency/1000,
m_pmt_in_progress->first);
break;
}
case iDVBFrontend::feCable:
{
eDVBFrontendParametersCable parm;
m_ch_current->getDVBC(parm);
snprintf(sname, 255, "%d SID 0x%02x",
parm.frequency/1000,
m_pmt_in_progress->first);
break;
}
}
SCAN_eDebug("name '%s', provider_name '%s'", sname, pname);
service->m_service_name = convertDVBUTF8(sname);
service->genSortName();
service->m_provider_name = convertDVBUTF8(pname);
}
if (!(m_flags & scanOnlyFree) || !m_pmt_in_progress->second.scrambled) {
SCAN_eDebug("add not scrambled!");
std::pair<std::map<eServiceReferenceDVB, ePtr<eDVBService> >::iterator, bool> i =
m_new_services.insert(std::pair<eServiceReferenceDVB, ePtr<eDVBService> >(ref, service));
if (i.second)
{
m_last_service = i.first;
m_event(evtNewService);
}
}
else
SCAN_eDebug("dont add... is scrambled!");
m_pmts_to_read.erase(m_pmt_in_progress++);
}
if (!m_chid_current)
eWarning("SCAN: the current channel's ID was not corrected - not adding channel.");
else
{
addKnownGoodChannel(m_chid_current, m_ch_current);
if (m_chid_current)
{
switch(type)
{
case iDVBFrontend::feSatellite:
case iDVBFrontend::feTerrestrial:
case iDVBFrontend::feCable:
{
ePtr<iDVBFrontend> fe;
if (!m_channel->getFrontend(fe))
{
ePyObject tp_dict = PyDict_New();
fe->getTransponderData(tp_dict, false);
// eDebug("add tuner data for tsid %04x, onid %04x, ns %08x",
// m_chid_current.transport_stream_id.get(), m_chid_current.original_network_id.get(),
// m_chid_current.dvbnamespace.get());
m_tuner_data.insert(std::pair<eDVBChannelID, ePyObjectWrapper>(m_chid_current, tp_dict));
Py_DECREF(tp_dict);
}
}
default:
break;
}
}
}
m_ch_scanned.push_back(m_ch_current);
for (std::list<ePtr<iDVBFrontendParameters> >::iterator i(m_ch_toScan.begin()); i != m_ch_toScan.end();)
{
if (sameChannel(*i, m_ch_current))
{
SCAN_eDebug("remove dupe 2");
m_ch_toScan.erase(i++);
continue;
}
++i;
}
nextChannel();
}
void eDVBScan::start(const eSmartPtrList<iDVBFrontendParameters> &known_transponders, int flags, int networkid)
{
m_flags = flags;
m_networkid = networkid;
m_ch_toScan.clear();
m_ch_scanned.clear();
m_ch_unavailable.clear();
m_new_channels.clear();
m_tuner_data.clear();
m_new_services.clear();
m_last_service = m_new_services.end();
for (eSmartPtrList<iDVBFrontendParameters>::const_iterator i(known_transponders.begin()); i != known_transponders.end(); ++i)
{
bool exist=false;
for (std::list<ePtr<iDVBFrontendParameters> >::const_iterator ii(m_ch_toScan.begin()); ii != m_ch_toScan.end(); ++ii)
{
if (sameChannel(*i, *ii, true))
{
exist=true;
break;
}
}
if (!exist)
m_ch_toScan.push_back(*i);
}
nextChannel();
}
void eDVBScan::insertInto(iDVBChannelList *db, bool backgroundscanresult)
{
if (m_flags & scanRemoveServices)
{
bool clearTerrestrial=false;
bool clearCable=false;
std::set<unsigned int> scanned_sat_positions;
std::list<ePtr<iDVBFrontendParameters> >::iterator it(m_ch_scanned.begin());
for (;it != m_ch_scanned.end(); ++it)
{
if (m_flags & scanDontRemoveUnscanned)
db->removeServices(&(*(*it)));
else
{
int system;
(*it)->getSystem(system);
switch(system)
{
case iDVBFrontend::feSatellite:
{
eDVBFrontendParametersSatellite sat_parm;
(*it)->getDVBS(sat_parm);
scanned_sat_positions.insert(sat_parm.orbital_position);
break;
}
case iDVBFrontend::feTerrestrial:
{
clearTerrestrial=true;
break;
}
case iDVBFrontend::feCable:
{
clearCable=true;
break;
}
}
}
}
for (it=m_ch_unavailable.begin();it != m_ch_unavailable.end(); ++it)
{
if (m_flags & scanDontRemoveUnscanned)
db->removeServices(&(*(*it)));
else
{
int system;
(*it)->getSystem(system);
switch(system)
{
case iDVBFrontend::feSatellite:
{
eDVBFrontendParametersSatellite sat_parm;
(*it)->getDVBS(sat_parm);
scanned_sat_positions.insert(sat_parm.orbital_position);
break;
}
case iDVBFrontend::feTerrestrial:
{
clearTerrestrial=true;
break;
}
case iDVBFrontend::feCable:
{
clearCable=true;
break;
}
}
}
}
if (clearTerrestrial)
{
eDVBChannelID chid;
chid.dvbnamespace=0xEEEE0000;
db->removeServices(chid);
}
if (clearCable)
{
eDVBChannelID chid;
chid.dvbnamespace=0xFFFF0000;
db->removeServices(chid);
}
for (std::set<unsigned int>::iterator x(scanned_sat_positions.begin()); x != scanned_sat_positions.end(); ++x)
{
eDVBChannelID chid;
if (m_flags & scanDontRemoveFeeds)
chid.dvbnamespace = eDVBNamespace((*x)<<16);
// eDebug("remove %d %08x", *x, chid.dvbnamespace.get());
db->removeServices(chid, *x);
}
}
for (std::map<eDVBChannelID, ePtr<iDVBFrontendParameters> >::const_iterator
ch(m_new_channels.begin()); ch != m_new_channels.end(); ++ch)
{
int system;
ch->second->getSystem(system);
std::map<eDVBChannelID, ePyObjectWrapper>::iterator it = m_tuner_data.find(ch->first);
switch(system)
{
case iDVBFrontend::feTerrestrial:
{
eDVBFrontendParameters *p = (eDVBFrontendParameters*)&(*ch->second);
eDVBFrontendParametersTerrestrial parm;
int freq = PyInt_AsLong(PyDict_GetItemString(it->second, "frequency"));
p->getDVBT(parm);
// eDebug("corrected freq for tsid %04x, onid %04x, ns %08x is %d, old was %d",
// ch->first.transport_stream_id.get(), ch->first.original_network_id.get(),
// ch->first.dvbnamespace.get(), freq, parm.frequency);
parm.frequency = freq;
p->setDVBT(parm);
break;
}
case iDVBFrontend::feSatellite: // no update of any transponder parameter yet
case iDVBFrontend::feCable:
break;
}
if (m_flags & scanOnlyFree)
{
eDVBFrontendParameters *ptr = (eDVBFrontendParameters*)&(*ch->second);
ptr->setFlags(iDVBFrontendParameters::flagOnlyFree);
}
db->addChannelToList(ch->first, ch->second);
}
for (std::map<eServiceReferenceDVB, ePtr<eDVBService> >::const_iterator
service(m_new_services.begin()); service != m_new_services.end(); ++service)
{
ePtr<eDVBService> dvb_service;
if (!db->getService(service->first, dvb_service))
{
if (dvb_service->m_flags & eDVBService::dxNoSDT)
continue;
if (!(dvb_service->m_flags & eDVBService::dxHoldName))
{
dvb_service->m_service_name = service->second->m_service_name;
dvb_service->m_service_name_sort = service->second->m_service_name_sort;
}
dvb_service->m_provider_name = service->second->m_provider_name;
if (service->second->m_ca.size())
dvb_service->m_ca = service->second->m_ca;
if (!backgroundscanresult) // do not remove new found flags when this is the result of a 'background scan'
dvb_service->m_flags &= ~eDVBService::dxNewFound;
}
else
{
db->addService(service->first, service->second);
if (!(m_flags & scanRemoveServices))
service->second->m_flags |= eDVBService::dxNewFound;
}
}
if (!backgroundscanresult)
{
/* only create a 'Last Scanned' bouquet when this is not the result of a background scan */
std::string bouquetname = "userbouquet.LastScanned.tv";
std::string bouquetquery = "FROM BOUQUET \"" + bouquetname + "\" ORDER BY bouquet";
eServiceReference bouquetref(eServiceReference::idDVB, eServiceReference::flagDirectory, bouquetquery);
bouquetref.setData(0, 1); /* set bouquet 'servicetype' to tv (even though we probably have both tv and radio channels) */
eBouquet *bouquet = NULL;
eServiceReference rootref(eServiceReference::idDVB, eServiceReference::flagDirectory, "FROM BOUQUET \"bouquets.tv\" ORDER BY bouquet");
if (!db->getBouquet(bouquetref, bouquet) && bouquet)
{
/* bouquet already exists, empty it before we continue */
bouquet->m_services.clear();
}
else
{
/* bouquet doesn't yet exist, create a new one */
if (!db->getBouquet(rootref, bouquet) && bouquet)
{
bouquet->m_services.push_back(bouquetref);
bouquet->flushChanges();
}
/* loading the bouquet seems to be the only way to add it to the bouquet list */
eDVBDB *dvbdb = eDVBDB::getInstance();
if (dvbdb) dvbdb->loadBouquet(bouquetname.c_str());
/* and now that it has been added to the list, we can find it */
db->getBouquet(bouquetref, bouquet);
}
if (bouquet)
{
bouquet->m_bouquet_name = "Last Scanned";
for (std::map<eServiceReferenceDVB, ePtr<eDVBService> >::const_iterator
service(m_new_services.begin()); service != m_new_services.end(); ++service)
{
bouquet->m_services.push_back(service->first);
}
bouquet->flushChanges();
}
else
{
eDebug("failed to create 'Last Scanned' bouquet!");
}
}
}
RESULT eDVBScan::processSDT(eDVBNamespace dvbnamespace, const ServiceDescriptionSection &sdt)
{
const ServiceDescriptionList &services = *sdt.getDescriptions();
SCAN_eDebug("ONID: %04x", sdt.getOriginalNetworkId());
eDVBChannelID chid(dvbnamespace, sdt.getTransportStreamId(), sdt.getOriginalNetworkId());
/* save correct CHID for this channel */
m_chid_current = chid;
for (ServiceDescriptionConstIterator s(services.begin()); s != services.end(); ++s)
{
unsigned short service_id = (*s)->getServiceId();
SCAN_eDebugNoNewLine("SID %04x: ", service_id);
bool add = true;
if (m_flags & scanOnlyFree)
{
std::map<unsigned short, service>::iterator it =
m_pmts_to_read.find(service_id);
if (it != m_pmts_to_read.end())
{
if (it->second.scrambled)
{
SCAN_eDebug("is scrambled!");
add = false;
}
else
SCAN_eDebug("is free");
}
else {
SCAN_eDebug("not found in PAT.. so we assume it is scrambled!!");
add = false;
}
}
if (add)
{
eServiceReferenceDVB ref;
ePtr<eDVBService> service = new eDVBService;
ref.set(chid);
ref.setServiceID(service_id);
for (DescriptorConstIterator desc = (*s)->getDescriptors()->begin();
desc != (*s)->getDescriptors()->end(); ++desc)
{
switch ((*desc)->getTag())
{
case SERVICE_DESCRIPTOR:
{
ServiceDescriptor &d = (ServiceDescriptor&)**desc;
int servicetype = d.getServiceType();
/* NA scanning hack */
switch (servicetype)
{
/* DISH/BEV servicetypes: */
case 128:
case 133:
case 137:
case 144:
case 145:
case 150:
case 154:
case 163:
case 164:
case 166:
case 167:
case 168:
servicetype = 1;
break;
}
/* */
ref.setServiceType(servicetype);
service->m_service_name = convertDVBUTF8(d.getServiceName());
service->genSortName();
service->m_provider_name = convertDVBUTF8(d.getServiceProviderName());
SCAN_eDebug("name '%s', provider_name '%s'", service->m_service_name.c_str(), service->m_provider_name.c_str());
break;
}
case CA_IDENTIFIER_DESCRIPTOR:
{
CaIdentifierDescriptor &d = (CaIdentifierDescriptor&)**desc;
const CaSystemIdList &caids = *d.getCaSystemIds();
SCAN_eDebugNoNewLine("CA ");
for (CaSystemIdList::const_iterator i(caids.begin()); i != caids.end(); ++i)
{
SCAN_eDebugNoNewLine("%04x ", *i);
service->m_ca.push_front(*i);
}
SCAN_eDebug("");
break;
}
default:
SCAN_eDebug("descr<%x>", (*desc)->getTag());
break;
}
}
std::pair<std::map<eServiceReferenceDVB, ePtr<eDVBService> >::iterator, bool> i =
m_new_services.insert(std::pair<eServiceReferenceDVB, ePtr<eDVBService> >(ref, service));
if (i.second)
{
m_last_service = i.first;
m_event(evtNewService);
}
}
if (m_pmt_running && m_pmt_in_progress->first == service_id)
m_abort_current_pmt = true;
else
m_pmts_to_read.erase(service_id);
}
return 0;
}
RESULT eDVBScan::connectEvent(const Slot1<void,int> &event, ePtr<eConnection> &connection)
{
connection = new eConnection(this, m_event.connect(event));
return 0;
}
void eDVBScan::getStats(int &transponders_done, int &transponders_total, int &services)
{
transponders_done = m_ch_scanned.size() + m_ch_unavailable.size();
transponders_total = m_ch_toScan.size() + transponders_done;
services = m_new_services.size();
}
void eDVBScan::getLastServiceName(std::string &last_service_name)
{
if (m_last_service == m_new_services.end())
last_service_name = "";
else
last_service_name = m_last_service->second->m_service_name;
}
RESULT eDVBScan::getFrontend(ePtr<iDVBFrontend> &fe)
{
if (m_channel)
return m_channel->getFrontend(fe);
fe = 0;
return -1;
}
RESULT eDVBScan::getCurrentTransponder(ePtr<iDVBFrontendParameters> &tp)
{
if (m_ch_current)
{
tp = m_ch_current;
return 0;
}
tp = 0;
return -1;
}
| gpl-2.0 |
heptalium/rpi-sources-3.16 | kernel/sched/deadline.c | 43070 | /*
* Deadline Scheduling Class (SCHED_DEADLINE)
*
* Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
*
* Tasks that periodically executes their instances for less than their
* runtime won't miss any of their deadlines.
* Tasks that are not periodic or sporadic or that tries to execute more
* than their reserved bandwidth will be slowed down (and may potentially
* miss some of their deadlines), and won't affect any other task.
*
* Copyright (C) 2012 Dario Faggioli <[email protected]>,
* Juri Lelli <[email protected]>,
* Michael Trimarchi <[email protected]>,
* Fabio Checconi <[email protected]>
*/
#include "sched.h"
#include <linux/slab.h>
struct dl_bandwidth def_dl_bandwidth;
static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
{
return container_of(dl_se, struct task_struct, dl);
}
static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
{
return container_of(dl_rq, struct rq, dl);
}
static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
{
struct task_struct *p = dl_task_of(dl_se);
struct rq *rq = task_rq(p);
return &rq->dl;
}
static inline int on_dl_rq(struct sched_dl_entity *dl_se)
{
return !RB_EMPTY_NODE(&dl_se->rb_node);
}
static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
{
struct sched_dl_entity *dl_se = &p->dl;
return dl_rq->rb_leftmost == &dl_se->rb_node;
}
void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
{
raw_spin_lock_init(&dl_b->dl_runtime_lock);
dl_b->dl_period = period;
dl_b->dl_runtime = runtime;
}
void init_dl_bw(struct dl_bw *dl_b)
{
raw_spin_lock_init(&dl_b->lock);
raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
if (global_rt_runtime() == RUNTIME_INF)
dl_b->bw = -1;
else
dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
dl_b->total_bw = 0;
}
void init_dl_rq(struct dl_rq *dl_rq, struct rq *rq)
{
dl_rq->rb_root = RB_ROOT;
#ifdef CONFIG_SMP
/* zero means no -deadline tasks */
dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
dl_rq->dl_nr_migratory = 0;
dl_rq->overloaded = 0;
dl_rq->pushable_dl_tasks_root = RB_ROOT;
#else
init_dl_bw(&dl_rq->dl_bw);
#endif
}
#ifdef CONFIG_SMP
static inline int dl_overloaded(struct rq *rq)
{
return atomic_read(&rq->rd->dlo_count);
}
static inline void dl_set_overload(struct rq *rq)
{
if (!rq->online)
return;
cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
/*
* Must be visible before the overload count is
* set (as in sched_rt.c).
*
* Matched by the barrier in pull_dl_task().
*/
smp_wmb();
atomic_inc(&rq->rd->dlo_count);
}
static inline void dl_clear_overload(struct rq *rq)
{
if (!rq->online)
return;
atomic_dec(&rq->rd->dlo_count);
cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
}
static void update_dl_migration(struct dl_rq *dl_rq)
{
if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
if (!dl_rq->overloaded) {
dl_set_overload(rq_of_dl_rq(dl_rq));
dl_rq->overloaded = 1;
}
} else if (dl_rq->overloaded) {
dl_clear_overload(rq_of_dl_rq(dl_rq));
dl_rq->overloaded = 0;
}
}
static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
struct task_struct *p = dl_task_of(dl_se);
if (p->nr_cpus_allowed > 1)
dl_rq->dl_nr_migratory++;
update_dl_migration(dl_rq);
}
static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
struct task_struct *p = dl_task_of(dl_se);
if (p->nr_cpus_allowed > 1)
dl_rq->dl_nr_migratory--;
update_dl_migration(dl_rq);
}
/*
* The list of pushable -deadline task is not a plist, like in
* sched_rt.c, it is an rb-tree with tasks ordered by deadline.
*/
static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
{
struct dl_rq *dl_rq = &rq->dl;
struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_node;
struct rb_node *parent = NULL;
struct task_struct *entry;
int leftmost = 1;
BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
while (*link) {
parent = *link;
entry = rb_entry(parent, struct task_struct,
pushable_dl_tasks);
if (dl_entity_preempt(&p->dl, &entry->dl))
link = &parent->rb_left;
else {
link = &parent->rb_right;
leftmost = 0;
}
}
if (leftmost)
dl_rq->pushable_dl_tasks_leftmost = &p->pushable_dl_tasks;
rb_link_node(&p->pushable_dl_tasks, parent, link);
rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
}
static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
{
struct dl_rq *dl_rq = &rq->dl;
if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
return;
if (dl_rq->pushable_dl_tasks_leftmost == &p->pushable_dl_tasks) {
struct rb_node *next_node;
next_node = rb_next(&p->pushable_dl_tasks);
dl_rq->pushable_dl_tasks_leftmost = next_node;
}
rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
RB_CLEAR_NODE(&p->pushable_dl_tasks);
}
static inline int has_pushable_dl_tasks(struct rq *rq)
{
return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root);
}
static int push_dl_task(struct rq *rq);
static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
{
return dl_task(prev);
}
static DEFINE_PER_CPU(struct callback_head, dl_push_head);
static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
static void push_dl_tasks(struct rq *);
static void pull_dl_task(struct rq *);
static inline void queue_push_tasks(struct rq *rq)
{
if (!has_pushable_dl_tasks(rq))
return;
queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
}
static inline void queue_pull_task(struct rq *rq)
{
queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
}
#else
static inline
void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
{
}
static inline
void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
{
}
static inline
void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
}
static inline
void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
}
static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
{
return false;
}
static inline void pull_dl_task(struct rq *rq)
{
}
static inline void queue_push_tasks(struct rq *rq)
{
}
static inline void queue_pull_task(struct rq *rq)
{
}
#endif /* CONFIG_SMP */
static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
int flags);
/*
* We are being explicitly informed that a new instance is starting,
* and this means that:
* - the absolute deadline of the entity has to be placed at
* current time + relative deadline;
* - the runtime of the entity has to be set to the maximum value.
*
* The capability of specifying such event is useful whenever a -deadline
* entity wants to (try to!) synchronize its behaviour with the scheduler's
* one, and to (try to!) reconcile itself with its own scheduling
* parameters.
*/
static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se,
struct sched_dl_entity *pi_se)
{
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
struct rq *rq = rq_of_dl_rq(dl_rq);
WARN_ON(!dl_se->dl_new || dl_se->dl_throttled);
/*
* We use the regular wall clock time to set deadlines in the
* future; in fact, we must consider execution overheads (time
* spent on hardirq context, etc.).
*/
dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
dl_se->runtime = pi_se->dl_runtime;
dl_se->dl_new = 0;
}
/*
* Pure Earliest Deadline First (EDF) scheduling does not deal with the
* possibility of a entity lasting more than what it declared, and thus
* exhausting its runtime.
*
* Here we are interested in making runtime overrun possible, but we do
* not want a entity which is misbehaving to affect the scheduling of all
* other entities.
* Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
* is used, in order to confine each entity within its own bandwidth.
*
* This function deals exactly with that, and ensures that when the runtime
* of a entity is replenished, its deadline is also postponed. That ensures
* the overrunning entity can't interfere with other entity in the system and
* can't make them miss their deadlines. Reasons why this kind of overruns
* could happen are, typically, a entity voluntarily trying to overcome its
* runtime, or it just underestimated it during sched_setscheduler_ex().
*/
static void replenish_dl_entity(struct sched_dl_entity *dl_se,
struct sched_dl_entity *pi_se)
{
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
struct rq *rq = rq_of_dl_rq(dl_rq);
BUG_ON(pi_se->dl_runtime <= 0);
/*
* This could be the case for a !-dl task that is boosted.
* Just go with full inherited parameters.
*/
if (dl_se->dl_deadline == 0) {
dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
dl_se->runtime = pi_se->dl_runtime;
}
/*
* We keep moving the deadline away until we get some
* available runtime for the entity. This ensures correct
* handling of situations where the runtime overrun is
* arbitrary large.
*/
while (dl_se->runtime <= 0) {
dl_se->deadline += pi_se->dl_period;
dl_se->runtime += pi_se->dl_runtime;
}
/*
* At this point, the deadline really should be "in
* the future" with respect to rq->clock. If it's
* not, we are, for some reason, lagging too much!
* Anyway, after having warn userspace abut that,
* we still try to keep the things running by
* resetting the deadline and the budget of the
* entity.
*/
if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
printk_deferred_once("sched: DL replenish lagged to much\n");
dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
dl_se->runtime = pi_se->dl_runtime;
}
}
/*
* Here we check if --at time t-- an entity (which is probably being
* [re]activated or, in general, enqueued) can use its remaining runtime
* and its current deadline _without_ exceeding the bandwidth it is
* assigned (function returns true if it can't). We are in fact applying
* one of the CBS rules: when a task wakes up, if the residual runtime
* over residual deadline fits within the allocated bandwidth, then we
* can keep the current (absolute) deadline and residual budget without
* disrupting the schedulability of the system. Otherwise, we should
* refill the runtime and set the deadline a period in the future,
* because keeping the current (absolute) deadline of the task would
* result in breaking guarantees promised to other tasks (refer to
* Documentation/scheduler/sched-deadline.txt for more informations).
*
* This function returns true if:
*
* runtime / (deadline - t) > dl_runtime / dl_period ,
*
* IOW we can't recycle current parameters.
*
* Notice that the bandwidth check is done against the period. For
* task with deadline equal to period this is the same of using
* dl_deadline instead of dl_period in the equation above.
*/
static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
struct sched_dl_entity *pi_se, u64 t)
{
u64 left, right;
/*
* left and right are the two sides of the equation above,
* after a bit of shuffling to use multiplications instead
* of divisions.
*
* Note that none of the time values involved in the two
* multiplications are absolute: dl_deadline and dl_runtime
* are the relative deadline and the maximum runtime of each
* instance, runtime is the runtime left for the last instance
* and (deadline - t), since t is rq->clock, is the time left
* to the (absolute) deadline. Even if overflowing the u64 type
* is very unlikely to occur in both cases, here we scale down
* as we want to avoid that risk at all. Scaling down by 10
* means that we reduce granularity to 1us. We are fine with it,
* since this is only a true/false check and, anyway, thinking
* of anything below microseconds resolution is actually fiction
* (but still we want to give the user that illusion >;).
*/
left = (pi_se->dl_period >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
right = ((dl_se->deadline - t) >> DL_SCALE) *
(pi_se->dl_runtime >> DL_SCALE);
return dl_time_before(right, left);
}
/*
* When a -deadline entity is queued back on the runqueue, its runtime and
* deadline might need updating.
*
* The policy here is that we update the deadline of the entity only if:
* - the current deadline is in the past,
* - using the remaining runtime with the current deadline would make
* the entity exceed its bandwidth.
*/
static void update_dl_entity(struct sched_dl_entity *dl_se,
struct sched_dl_entity *pi_se)
{
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
struct rq *rq = rq_of_dl_rq(dl_rq);
/*
* The arrival of a new instance needs special treatment, i.e.,
* the actual scheduling parameters have to be "renewed".
*/
if (dl_se->dl_new) {
setup_new_dl_entity(dl_se, pi_se);
return;
}
if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
dl_se->runtime = pi_se->dl_runtime;
}
}
/*
* If the entity depleted all its runtime, and if we want it to sleep
* while waiting for some new execution time to become available, we
* set the bandwidth enforcement timer to the replenishment instant
* and try to activate it.
*
* Notice that it is important for the caller to know if the timer
* actually started or not (i.e., the replenishment instant is in
* the future or in the past).
*/
static int start_dl_timer(struct sched_dl_entity *dl_se, bool boosted)
{
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
struct rq *rq = rq_of_dl_rq(dl_rq);
ktime_t now, act;
ktime_t soft, hard;
unsigned long range;
s64 delta;
if (boosted)
return 0;
/*
* We want the timer to fire at the deadline, but considering
* that it is actually coming from rq->clock and not from
* hrtimer's time base reading.
*/
act = ns_to_ktime(dl_se->deadline);
now = hrtimer_cb_get_time(&dl_se->dl_timer);
delta = ktime_to_ns(now) - rq_clock(rq);
act = ktime_add_ns(act, delta);
/*
* If the expiry time already passed, e.g., because the value
* chosen as the deadline is too small, don't even try to
* start the timer in the past!
*/
if (ktime_us_delta(act, now) < 0)
return 0;
hrtimer_set_expires(&dl_se->dl_timer, act);
soft = hrtimer_get_softexpires(&dl_se->dl_timer);
hard = hrtimer_get_expires(&dl_se->dl_timer);
range = ktime_to_ns(ktime_sub(hard, soft));
__hrtimer_start_range_ns(&dl_se->dl_timer, soft,
range, HRTIMER_MODE_ABS, 0);
return hrtimer_active(&dl_se->dl_timer);
}
/*
* This is the bandwidth enforcement timer callback. If here, we know
* a task is not on its dl_rq, since the fact that the timer was running
* means the task is throttled and needs a runtime replenishment.
*
* However, what we actually do depends on the fact the task is active,
* (it is on its rq) or has been removed from there by a call to
* dequeue_task_dl(). In the former case we must issue the runtime
* replenishment and add the task back to the dl_rq; in the latter, we just
* do nothing but clearing dl_throttled, so that runtime and deadline
* updating (and the queueing back to dl_rq) will be done by the
* next call to enqueue_task_dl().
*/
static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
{
struct sched_dl_entity *dl_se = container_of(timer,
struct sched_dl_entity,
dl_timer);
struct task_struct *p = dl_task_of(dl_se);
struct rq *rq;
again:
rq = task_rq(p);
raw_spin_lock(&rq->lock);
if (rq != task_rq(p)) {
/* Task was moved, retrying. */
raw_spin_unlock(&rq->lock);
goto again;
}
/*
* We need to take care of a possible races here. In fact, the
* task might have changed its scheduling policy to something
* different from SCHED_DEADLINE or changed its reservation
* parameters (through sched_setattr()).
*/
if (!dl_task(p) || dl_se->dl_new)
goto unlock;
sched_clock_tick();
update_rq_clock(rq);
dl_se->dl_throttled = 0;
dl_se->dl_yielded = 0;
if (p->on_rq) {
enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
if (task_has_dl_policy(rq->curr))
check_preempt_curr_dl(rq, p, 0);
else
resched_task(rq->curr);
#ifdef CONFIG_SMP
/*
* Queueing this task back might have overloaded rq,
* check if we need to kick someone away.
*/
if (has_pushable_dl_tasks(rq))
push_dl_task(rq);
#endif
}
unlock:
raw_spin_unlock(&rq->lock);
return HRTIMER_NORESTART;
}
void init_dl_task_timer(struct sched_dl_entity *dl_se)
{
struct hrtimer *timer = &dl_se->dl_timer;
if (hrtimer_active(timer)) {
hrtimer_try_to_cancel(timer);
return;
}
hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
timer->function = dl_task_timer;
}
static
int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se)
{
return (dl_se->runtime <= 0);
}
extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
/*
* Update the current task's runtime statistics (provided it is still
* a -deadline task and has not been removed from the dl_rq).
*/
static void update_curr_dl(struct rq *rq)
{
struct task_struct *curr = rq->curr;
struct sched_dl_entity *dl_se = &curr->dl;
u64 delta_exec;
if (!dl_task(curr) || !on_dl_rq(dl_se))
return;
/*
* Consumed budget is computed considering the time as
* observed by schedulable tasks (excluding time spent
* in hardirq context, etc.). Deadlines are instead
* computed using hard walltime. This seems to be the more
* natural solution, but the full ramifications of this
* approach need further study.
*/
delta_exec = rq_clock_task(rq) - curr->se.exec_start;
if (unlikely((s64)delta_exec <= 0))
return;
schedstat_set(curr->se.statistics.exec_max,
max(curr->se.statistics.exec_max, delta_exec));
curr->se.sum_exec_runtime += delta_exec;
account_group_exec_runtime(curr, delta_exec);
curr->se.exec_start = rq_clock_task(rq);
cpuacct_charge(curr, delta_exec);
sched_rt_avg_update(rq, delta_exec);
dl_se->runtime -= delta_exec;
if (dl_runtime_exceeded(rq, dl_se)) {
__dequeue_task_dl(rq, curr, 0);
if (likely(start_dl_timer(dl_se, curr->dl.dl_boosted)))
dl_se->dl_throttled = 1;
else
enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
if (!is_leftmost(curr, &rq->dl))
resched_task(curr);
}
/*
* Because -- for now -- we share the rt bandwidth, we need to
* account our runtime there too, otherwise actual rt tasks
* would be able to exceed the shared quota.
*
* Account to the root rt group for now.
*
* The solution we're working towards is having the RT groups scheduled
* using deadline servers -- however there's a few nasties to figure
* out before that can happen.
*/
if (rt_bandwidth_enabled()) {
struct rt_rq *rt_rq = &rq->rt;
raw_spin_lock(&rt_rq->rt_runtime_lock);
/*
* We'll let actual RT tasks worry about the overflow here, we
* have our own CBS to keep us inline; only account when RT
* bandwidth is relevant.
*/
if (sched_rt_bandwidth_account(rt_rq))
rt_rq->rt_time += delta_exec;
raw_spin_unlock(&rt_rq->rt_runtime_lock);
}
}
#ifdef CONFIG_SMP
static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu);
static inline u64 next_deadline(struct rq *rq)
{
struct task_struct *next = pick_next_earliest_dl_task(rq, rq->cpu);
if (next && dl_prio(next->prio))
return next->dl.deadline;
else
return 0;
}
static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
{
struct rq *rq = rq_of_dl_rq(dl_rq);
if (dl_rq->earliest_dl.curr == 0 ||
dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
/*
* If the dl_rq had no -deadline tasks, or if the new task
* has shorter deadline than the current one on dl_rq, we
* know that the previous earliest becomes our next earliest,
* as the new task becomes the earliest itself.
*/
dl_rq->earliest_dl.next = dl_rq->earliest_dl.curr;
dl_rq->earliest_dl.curr = deadline;
cpudl_set(&rq->rd->cpudl, rq->cpu, deadline, 1);
} else if (dl_rq->earliest_dl.next == 0 ||
dl_time_before(deadline, dl_rq->earliest_dl.next)) {
/*
* On the other hand, if the new -deadline task has a
* a later deadline than the earliest one on dl_rq, but
* it is earlier than the next (if any), we must
* recompute the next-earliest.
*/
dl_rq->earliest_dl.next = next_deadline(rq);
}
}
static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
{
struct rq *rq = rq_of_dl_rq(dl_rq);
/*
* Since we may have removed our earliest (and/or next earliest)
* task we must recompute them.
*/
if (!dl_rq->dl_nr_running) {
dl_rq->earliest_dl.curr = 0;
dl_rq->earliest_dl.next = 0;
cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
} else {
struct rb_node *leftmost = dl_rq->rb_leftmost;
struct sched_dl_entity *entry;
entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
dl_rq->earliest_dl.curr = entry->deadline;
dl_rq->earliest_dl.next = next_deadline(rq);
cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline, 1);
}
}
#else
static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
#endif /* CONFIG_SMP */
static inline
void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
int prio = dl_task_of(dl_se)->prio;
u64 deadline = dl_se->deadline;
WARN_ON(!dl_prio(prio));
dl_rq->dl_nr_running++;
add_nr_running(rq_of_dl_rq(dl_rq), 1);
inc_dl_deadline(dl_rq, deadline);
inc_dl_migration(dl_se, dl_rq);
}
static inline
void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
int prio = dl_task_of(dl_se)->prio;
WARN_ON(!dl_prio(prio));
WARN_ON(!dl_rq->dl_nr_running);
dl_rq->dl_nr_running--;
sub_nr_running(rq_of_dl_rq(dl_rq), 1);
dec_dl_deadline(dl_rq, dl_se->deadline);
dec_dl_migration(dl_se, dl_rq);
}
static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
{
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
struct rb_node **link = &dl_rq->rb_root.rb_node;
struct rb_node *parent = NULL;
struct sched_dl_entity *entry;
int leftmost = 1;
BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
while (*link) {
parent = *link;
entry = rb_entry(parent, struct sched_dl_entity, rb_node);
if (dl_time_before(dl_se->deadline, entry->deadline))
link = &parent->rb_left;
else {
link = &parent->rb_right;
leftmost = 0;
}
}
if (leftmost)
dl_rq->rb_leftmost = &dl_se->rb_node;
rb_link_node(&dl_se->rb_node, parent, link);
rb_insert_color(&dl_se->rb_node, &dl_rq->rb_root);
inc_dl_tasks(dl_se, dl_rq);
}
static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
{
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
if (RB_EMPTY_NODE(&dl_se->rb_node))
return;
if (dl_rq->rb_leftmost == &dl_se->rb_node) {
struct rb_node *next_node;
next_node = rb_next(&dl_se->rb_node);
dl_rq->rb_leftmost = next_node;
}
rb_erase(&dl_se->rb_node, &dl_rq->rb_root);
RB_CLEAR_NODE(&dl_se->rb_node);
dec_dl_tasks(dl_se, dl_rq);
}
static void
enqueue_dl_entity(struct sched_dl_entity *dl_se,
struct sched_dl_entity *pi_se, int flags)
{
BUG_ON(on_dl_rq(dl_se));
/*
* If this is a wakeup or a new instance, the scheduling
* parameters of the task might need updating. Otherwise,
* we want a replenishment of its runtime.
*/
if (dl_se->dl_new || flags & ENQUEUE_WAKEUP)
update_dl_entity(dl_se, pi_se);
else if (flags & ENQUEUE_REPLENISH)
replenish_dl_entity(dl_se, pi_se);
__enqueue_dl_entity(dl_se);
}
static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
{
__dequeue_dl_entity(dl_se);
}
static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
{
struct task_struct *pi_task = rt_mutex_get_top_task(p);
struct sched_dl_entity *pi_se = &p->dl;
/*
* Use the scheduling parameters of the top pi-waiter
* task if we have one and its (relative) deadline is
* smaller than our one... OTW we keep our runtime and
* deadline.
*/
if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio))
pi_se = &pi_task->dl;
/*
* If p is throttled, we do nothing. In fact, if it exhausted
* its budget it needs a replenishment and, since it now is on
* its rq, the bandwidth timer callback (which clearly has not
* run yet) will take care of this.
*/
if (p->dl.dl_throttled)
return;
enqueue_dl_entity(&p->dl, pi_se, flags);
if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
enqueue_pushable_dl_task(rq, p);
}
static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
{
dequeue_dl_entity(&p->dl);
dequeue_pushable_dl_task(rq, p);
}
static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
{
update_curr_dl(rq);
__dequeue_task_dl(rq, p, flags);
}
/*
* Yield task semantic for -deadline tasks is:
*
* get off from the CPU until our next instance, with
* a new runtime. This is of little use now, since we
* don't have a bandwidth reclaiming mechanism. Anyway,
* bandwidth reclaiming is planned for the future, and
* yield_task_dl will indicate that some spare budget
* is available for other task instances to use it.
*/
static void yield_task_dl(struct rq *rq)
{
struct task_struct *p = rq->curr;
/*
* We make the task go to sleep until its current deadline by
* forcing its runtime to zero. This way, update_curr_dl() stops
* it and the bandwidth timer will wake it up and will give it
* new scheduling parameters (thanks to dl_yielded=1).
*/
if (p->dl.runtime > 0) {
rq->curr->dl.dl_yielded = 1;
p->dl.runtime = 0;
}
update_curr_dl(rq);
}
#ifdef CONFIG_SMP
static int find_later_rq(struct task_struct *task);
static int
select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
{
struct task_struct *curr;
struct rq *rq;
if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
goto out;
rq = cpu_rq(cpu);
rcu_read_lock();
curr = ACCESS_ONCE(rq->curr); /* unlocked access */
/*
* If we are dealing with a -deadline task, we must
* decide where to wake it up.
* If it has a later deadline and the current task
* on this rq can't move (provided the waking task
* can!) we prefer to send it somewhere else. On the
* other hand, if it has a shorter deadline, we
* try to make it stay here, it might be important.
*/
if (unlikely(dl_task(curr)) &&
(curr->nr_cpus_allowed < 2 ||
!dl_entity_preempt(&p->dl, &curr->dl)) &&
(p->nr_cpus_allowed > 1)) {
int target = find_later_rq(p);
if (target != -1)
cpu = target;
}
rcu_read_unlock();
out:
return cpu;
}
static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
{
/*
* Current can't be migrated, useless to reschedule,
* let's hope p can move out.
*/
if (rq->curr->nr_cpus_allowed == 1 ||
cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1)
return;
/*
* p is migratable, so let's not schedule it and
* see if it is pushed or pulled somewhere else.
*/
if (p->nr_cpus_allowed != 1 &&
cpudl_find(&rq->rd->cpudl, p, NULL) != -1)
return;
resched_task(rq->curr);
}
#endif /* CONFIG_SMP */
/*
* Only called when both the current and waking task are -deadline
* tasks.
*/
static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
int flags)
{
if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
resched_task(rq->curr);
return;
}
#ifdef CONFIG_SMP
/*
* In the unlikely case current and p have the same deadline
* let us try to decide what's the best thing to do...
*/
if ((p->dl.deadline == rq->curr->dl.deadline) &&
!test_tsk_need_resched(rq->curr))
check_preempt_equal_dl(rq, p);
#endif /* CONFIG_SMP */
}
#ifdef CONFIG_SCHED_HRTICK
static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
{
s64 delta = p->dl.dl_runtime - p->dl.runtime;
if (delta > 10000)
hrtick_start(rq, p->dl.runtime);
}
#endif
static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
struct dl_rq *dl_rq)
{
struct rb_node *left = dl_rq->rb_leftmost;
if (!left)
return NULL;
return rb_entry(left, struct sched_dl_entity, rb_node);
}
struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
{
struct sched_dl_entity *dl_se;
struct task_struct *p;
struct dl_rq *dl_rq;
dl_rq = &rq->dl;
if (need_pull_dl_task(rq, prev)) {
pull_dl_task(rq);
/*
* pull_rt_task() can drop (and re-acquire) rq->lock; this
* means a stop task can slip in, in which case we need to
* re-start task selection.
*/
if (rq->stop && rq->stop->on_rq)
return RETRY_TASK;
}
/*
* When prev is DL, we may throttle it in put_prev_task().
* So, we update time before we check for dl_nr_running.
*/
if (prev->sched_class == &dl_sched_class)
update_curr_dl(rq);
if (unlikely(!dl_rq->dl_nr_running))
return NULL;
put_prev_task(rq, prev);
dl_se = pick_next_dl_entity(rq, dl_rq);
BUG_ON(!dl_se);
p = dl_task_of(dl_se);
p->se.exec_start = rq_clock_task(rq);
/* Running task will never be pushed. */
dequeue_pushable_dl_task(rq, p);
#ifdef CONFIG_SCHED_HRTICK
if (hrtick_enabled(rq))
start_hrtick_dl(rq, p);
#endif
queue_push_tasks(rq);
return p;
}
static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
{
update_curr_dl(rq);
if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
enqueue_pushable_dl_task(rq, p);
}
static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
{
update_curr_dl(rq);
#ifdef CONFIG_SCHED_HRTICK
if (hrtick_enabled(rq) && queued && p->dl.runtime > 0)
start_hrtick_dl(rq, p);
#endif
}
static void task_fork_dl(struct task_struct *p)
{
/*
* SCHED_DEADLINE tasks cannot fork and this is achieved through
* sched_fork()
*/
}
static void task_dead_dl(struct task_struct *p)
{
struct hrtimer *timer = &p->dl.dl_timer;
struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
/*
* Since we are TASK_DEAD we won't slip out of the domain!
*/
raw_spin_lock_irq(&dl_b->lock);
dl_b->total_bw -= p->dl.dl_bw;
raw_spin_unlock_irq(&dl_b->lock);
hrtimer_cancel(timer);
}
static void set_curr_task_dl(struct rq *rq)
{
struct task_struct *p = rq->curr;
p->se.exec_start = rq_clock_task(rq);
/* You can't push away the running task */
dequeue_pushable_dl_task(rq, p);
}
#ifdef CONFIG_SMP
/* Only try algorithms three times */
#define DL_MAX_TRIES 3
static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
{
if (!task_running(rq, p) &&
(cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) &&
(p->nr_cpus_allowed > 1))
return 1;
return 0;
}
/* Returns the second earliest -deadline task, NULL otherwise */
static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu)
{
struct rb_node *next_node = rq->dl.rb_leftmost;
struct sched_dl_entity *dl_se;
struct task_struct *p = NULL;
next_node:
next_node = rb_next(next_node);
if (next_node) {
dl_se = rb_entry(next_node, struct sched_dl_entity, rb_node);
p = dl_task_of(dl_se);
if (pick_dl_task(rq, p, cpu))
return p;
goto next_node;
}
return NULL;
}
static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
static int find_later_rq(struct task_struct *task)
{
struct sched_domain *sd;
struct cpumask *later_mask = __get_cpu_var(local_cpu_mask_dl);
int this_cpu = smp_processor_id();
int best_cpu, cpu = task_cpu(task);
/* Make sure the mask is initialized first */
if (unlikely(!later_mask))
return -1;
if (task->nr_cpus_allowed == 1)
return -1;
best_cpu = cpudl_find(&task_rq(task)->rd->cpudl,
task, later_mask);
if (best_cpu == -1)
return -1;
/*
* If we are here, some target has been found,
* the most suitable of which is cached in best_cpu.
* This is, among the runqueues where the current tasks
* have later deadlines than the task's one, the rq
* with the latest possible one.
*
* Now we check how well this matches with task's
* affinity and system topology.
*
* The last cpu where the task run is our first
* guess, since it is most likely cache-hot there.
*/
if (cpumask_test_cpu(cpu, later_mask))
return cpu;
/*
* Check if this_cpu is to be skipped (i.e., it is
* not in the mask) or not.
*/
if (!cpumask_test_cpu(this_cpu, later_mask))
this_cpu = -1;
rcu_read_lock();
for_each_domain(cpu, sd) {
if (sd->flags & SD_WAKE_AFFINE) {
/*
* If possible, preempting this_cpu is
* cheaper than migrating.
*/
if (this_cpu != -1 &&
cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
rcu_read_unlock();
return this_cpu;
}
/*
* Last chance: if best_cpu is valid and is
* in the mask, that becomes our choice.
*/
if (best_cpu < nr_cpu_ids &&
cpumask_test_cpu(best_cpu, sched_domain_span(sd))) {
rcu_read_unlock();
return best_cpu;
}
}
}
rcu_read_unlock();
/*
* At this point, all our guesses failed, we just return
* 'something', and let the caller sort the things out.
*/
if (this_cpu != -1)
return this_cpu;
cpu = cpumask_any(later_mask);
if (cpu < nr_cpu_ids)
return cpu;
return -1;
}
/* Locks the rq it finds */
static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
{
struct rq *later_rq = NULL;
int tries;
int cpu;
for (tries = 0; tries < DL_MAX_TRIES; tries++) {
cpu = find_later_rq(task);
if ((cpu == -1) || (cpu == rq->cpu))
break;
later_rq = cpu_rq(cpu);
/* Retry if something changed. */
if (double_lock_balance(rq, later_rq)) {
if (unlikely(task_rq(task) != rq ||
!cpumask_test_cpu(later_rq->cpu,
&task->cpus_allowed) ||
task_running(rq, task) || !task->on_rq)) {
double_unlock_balance(rq, later_rq);
later_rq = NULL;
break;
}
}
/*
* If the rq we found has no -deadline task, or
* its earliest one has a later deadline than our
* task, the rq is a good one.
*/
if (!later_rq->dl.dl_nr_running ||
dl_time_before(task->dl.deadline,
later_rq->dl.earliest_dl.curr))
break;
/* Otherwise we try again. */
double_unlock_balance(rq, later_rq);
later_rq = NULL;
}
return later_rq;
}
static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
{
struct task_struct *p;
if (!has_pushable_dl_tasks(rq))
return NULL;
p = rb_entry(rq->dl.pushable_dl_tasks_leftmost,
struct task_struct, pushable_dl_tasks);
BUG_ON(rq->cpu != task_cpu(p));
BUG_ON(task_current(rq, p));
BUG_ON(p->nr_cpus_allowed <= 1);
BUG_ON(!p->on_rq);
BUG_ON(!dl_task(p));
return p;
}
/*
* See if the non running -deadline tasks on this rq
* can be sent to some other CPU where they can preempt
* and start executing.
*/
static int push_dl_task(struct rq *rq)
{
struct task_struct *next_task;
struct rq *later_rq;
if (!rq->dl.overloaded)
return 0;
next_task = pick_next_pushable_dl_task(rq);
if (!next_task)
return 0;
retry:
if (unlikely(next_task == rq->curr)) {
WARN_ON(1);
return 0;
}
/*
* If next_task preempts rq->curr, and rq->curr
* can move away, it makes sense to just reschedule
* without going further in pushing next_task.
*/
if (dl_task(rq->curr) &&
dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
rq->curr->nr_cpus_allowed > 1) {
resched_task(rq->curr);
return 0;
}
/* We might release rq lock */
get_task_struct(next_task);
/* Will lock the rq it'll find */
later_rq = find_lock_later_rq(next_task, rq);
if (!later_rq) {
struct task_struct *task;
/*
* We must check all this again, since
* find_lock_later_rq releases rq->lock and it is
* then possible that next_task has migrated.
*/
task = pick_next_pushable_dl_task(rq);
if (task_cpu(next_task) == rq->cpu && task == next_task) {
/*
* The task is still there. We don't try
* again, some other cpu will pull it when ready.
*/
dequeue_pushable_dl_task(rq, next_task);
goto out;
}
if (!task)
/* No more tasks */
goto out;
put_task_struct(next_task);
next_task = task;
goto retry;
}
deactivate_task(rq, next_task, 0);
set_task_cpu(next_task, later_rq->cpu);
activate_task(later_rq, next_task, 0);
resched_task(later_rq->curr);
double_unlock_balance(rq, later_rq);
out:
put_task_struct(next_task);
return 1;
}
static void push_dl_tasks(struct rq *rq)
{
/* Terminates as it moves a -deadline task */
while (push_dl_task(rq))
;
}
static void pull_dl_task(struct rq *this_rq)
{
int this_cpu = this_rq->cpu, cpu;
struct task_struct *p;
bool resched = false;
struct rq *src_rq;
u64 dmin = LONG_MAX;
if (likely(!dl_overloaded(this_rq)))
return;
/*
* Match the barrier from dl_set_overloaded; this guarantees that if we
* see overloaded we must also see the dlo_mask bit.
*/
smp_rmb();
for_each_cpu(cpu, this_rq->rd->dlo_mask) {
if (this_cpu == cpu)
continue;
src_rq = cpu_rq(cpu);
/*
* It looks racy, abd it is! However, as in sched_rt.c,
* we are fine with this.
*/
if (this_rq->dl.dl_nr_running &&
dl_time_before(this_rq->dl.earliest_dl.curr,
src_rq->dl.earliest_dl.next))
continue;
/* Might drop this_rq->lock */
double_lock_balance(this_rq, src_rq);
/*
* If there are no more pullable tasks on the
* rq, we're done with it.
*/
if (src_rq->dl.dl_nr_running <= 1)
goto skip;
p = pick_next_earliest_dl_task(src_rq, this_cpu);
/*
* We found a task to be pulled if:
* - it preempts our current (if there's one),
* - it will preempt the last one we pulled (if any).
*/
if (p && dl_time_before(p->dl.deadline, dmin) &&
(!this_rq->dl.dl_nr_running ||
dl_time_before(p->dl.deadline,
this_rq->dl.earliest_dl.curr))) {
WARN_ON(p == src_rq->curr);
WARN_ON(!p->on_rq);
/*
* Then we pull iff p has actually an earlier
* deadline than the current task of its runqueue.
*/
if (dl_time_before(p->dl.deadline,
src_rq->curr->dl.deadline))
goto skip;
resched = true;
deactivate_task(src_rq, p, 0);
set_task_cpu(p, this_cpu);
activate_task(this_rq, p, 0);
dmin = p->dl.deadline;
/* Is there any other task even earlier? */
}
skip:
double_unlock_balance(this_rq, src_rq);
}
if (resched)
resched_task(this_rq->curr);
}
/*
* Since the task is not running and a reschedule is not going to happen
* anytime soon on its runqueue, we try pushing it away now.
*/
static void task_woken_dl(struct rq *rq, struct task_struct *p)
{
if (!task_running(rq, p) &&
!test_tsk_need_resched(rq->curr) &&
has_pushable_dl_tasks(rq) &&
p->nr_cpus_allowed > 1 &&
dl_task(rq->curr) &&
(rq->curr->nr_cpus_allowed < 2 ||
dl_entity_preempt(&rq->curr->dl, &p->dl))) {
push_dl_tasks(rq);
}
}
static void set_cpus_allowed_dl(struct task_struct *p,
const struct cpumask *new_mask)
{
struct rq *rq;
int weight;
BUG_ON(!dl_task(p));
/*
* Update only if the task is actually running (i.e.,
* it is on the rq AND it is not throttled).
*/
if (!on_dl_rq(&p->dl))
return;
weight = cpumask_weight(new_mask);
/*
* Only update if the process changes its state from whether it
* can migrate or not.
*/
if ((p->nr_cpus_allowed > 1) == (weight > 1))
return;
rq = task_rq(p);
/*
* The process used to be able to migrate OR it can now migrate
*/
if (weight <= 1) {
if (!task_current(rq, p))
dequeue_pushable_dl_task(rq, p);
BUG_ON(!rq->dl.dl_nr_migratory);
rq->dl.dl_nr_migratory--;
} else {
if (!task_current(rq, p))
enqueue_pushable_dl_task(rq, p);
rq->dl.dl_nr_migratory++;
}
update_dl_migration(&rq->dl);
}
/* Assumes rq->lock is held */
static void rq_online_dl(struct rq *rq)
{
if (rq->dl.overloaded)
dl_set_overload(rq);
if (rq->dl.dl_nr_running > 0)
cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1);
}
/* Assumes rq->lock is held */
static void rq_offline_dl(struct rq *rq)
{
if (rq->dl.overloaded)
dl_clear_overload(rq);
cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
}
void init_sched_dl_class(void)
{
unsigned int i;
for_each_possible_cpu(i)
zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
GFP_KERNEL, cpu_to_node(i));
}
#endif /* CONFIG_SMP */
static void switched_from_dl(struct rq *rq, struct task_struct *p)
{
if (hrtimer_active(&p->dl.dl_timer) && !dl_policy(p->policy))
hrtimer_try_to_cancel(&p->dl.dl_timer);
#ifdef CONFIG_SMP
/*
* Since this might be the only -deadline task on the rq,
* this is the right place to try to pull some other one
* from an overloaded cpu, if any.
*/
if (!rq->dl.dl_nr_running)
queue_pull_task(rq);
#endif
}
/*
* When switching to -deadline, we may overload the rq, then
* we try to push someone off, if possible.
*/
static void switched_to_dl(struct rq *rq, struct task_struct *p)
{
/*
* If p is throttled, don't consider the possibility
* of preempting rq->curr, the check will be done right
* after its runtime will get replenished.
*/
if (unlikely(p->dl.dl_throttled))
return;
if (p->on_rq && rq->curr != p) {
#ifdef CONFIG_SMP
if (rq->dl.overloaded)
queue_push_tasks(rq);
#else
if (task_has_dl_policy(rq->curr))
check_preempt_curr_dl(rq, p, 0);
#endif /* CONFIG_SMP */
}
}
/*
* If the scheduling parameters of a -deadline task changed,
* a push or pull operation might be needed.
*/
static void prio_changed_dl(struct rq *rq, struct task_struct *p,
int oldprio)
{
if (p->on_rq || rq->curr == p) {
#ifdef CONFIG_SMP
/*
* This might be too much, but unfortunately
* we don't have the old deadline value, and
* we can't argue if the task is increasing
* or lowering its prio, so...
*/
if (!rq->dl.overloaded)
queue_pull_task(rq);
/*
* If we now have a earlier deadline task than p,
* then reschedule, provided p is still on this
* runqueue.
*/
if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
resched_task(p);
#else
/*
* Again, we don't know if p has a earlier
* or later deadline, so let's blindly set a
* (maybe not needed) rescheduling point.
*/
resched_task(p);
#endif /* CONFIG_SMP */
} else
switched_to_dl(rq, p);
}
const struct sched_class dl_sched_class = {
.next = &rt_sched_class,
.enqueue_task = enqueue_task_dl,
.dequeue_task = dequeue_task_dl,
.yield_task = yield_task_dl,
.check_preempt_curr = check_preempt_curr_dl,
.pick_next_task = pick_next_task_dl,
.put_prev_task = put_prev_task_dl,
#ifdef CONFIG_SMP
.select_task_rq = select_task_rq_dl,
.set_cpus_allowed = set_cpus_allowed_dl,
.rq_online = rq_online_dl,
.rq_offline = rq_offline_dl,
.task_woken = task_woken_dl,
#endif
.set_curr_task = set_curr_task_dl,
.task_tick = task_tick_dl,
.task_fork = task_fork_dl,
.task_dead = task_dead_dl,
.prio_changed = prio_changed_dl,
.switched_from = switched_from_dl,
.switched_to = switched_to_dl,
};
| gpl-2.0 |
embedian/blues-tools | src/bt-network.c | 9028 | /*
*
* bluez-tools - a set of tools to manage bluetooth devices for linux
*
* Copyright (C) 2010-2011 Alexander Orlenko <[email protected]>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <locale.h>
#include <stdlib.h>
#include <string.h>
#include <signal.h>
#include <unistd.h>
#include <gio/gio.h>
#include "lib/dbus-common.h"
#include "lib/helpers.h"
#include "lib/bluez-api.h"
static GMainLoop *mainloop = NULL;
static void sigterm_handler(int sig)
{
g_message("%s received", sig == SIGTERM ? "SIGTERM" : "SIGINT");
g_main_loop_quit(mainloop);
}
static void trap_signals()
{
/* Add SIGTERM && SIGINT handlers */
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_handler = sigterm_handler;
sigaction(SIGTERM, &sa, NULL);
sigaction(SIGINT, &sa, NULL);
}
static void _bt_network_property_changed(GDBusConnection *connection, const gchar *sender_name, const gchar *object_path, const gchar *interface_name, const gchar *signal_name, GVariant *parameters, gpointer user_data)
{
g_assert(user_data != NULL);
GMainLoop *mainloop = user_data;
GVariant *changed_properties = g_variant_get_child_value(parameters, 1);
g_variant_lookup_value(changed_properties, "UUID", NULL);
if (g_variant_lookup_value(changed_properties, "Connected", NULL))
{
if (g_variant_get_boolean(g_variant_lookup_value(changed_properties, "Connected", NULL)) == TRUE)
{
g_print("Network service is connected\n");
}
else
{
g_print("Network service is disconnected\n");
g_main_loop_quit(mainloop);
}
}
else if (g_variant_lookup_value(changed_properties, "Interface", NULL))
{
g_print("Interface: %s\n", g_variant_get_string(g_variant_lookup_value(changed_properties, "Interface", NULL), NULL));
}
else if (g_variant_lookup_value(changed_properties, "UUID", NULL))
{
g_print("UUID: %s (%s)\n", uuid2name(g_variant_get_string(g_variant_lookup_value(changed_properties, "UUID", NULL), NULL)), g_variant_get_string(g_variant_lookup_value(changed_properties, "UUID", NULL), NULL));
}
}
static gchar *adapter_arg = NULL;
static gboolean connect_arg = FALSE;
static gchar *connect_device_arg = NULL;
static gchar *connect_uuid_arg = NULL;
static gboolean server_arg = FALSE;
static gchar *server_uuid_arg = NULL;
static gchar *server_brige_arg = NULL;
static gboolean daemon_arg = FALSE;
static GOptionEntry entries[] = {
{"adapter", 'a', 0, G_OPTION_ARG_STRING, &adapter_arg, "Adapter Name or MAC", "<name|mac>"},
{"connect", 'c', 0, G_OPTION_ARG_NONE, &connect_arg, "Connect to the network device", NULL},
{"server", 's', 0, G_OPTION_ARG_NONE, &server_arg, "Start GN/PANU/NAP server", NULL},
{"daemon", 'd', 0, G_OPTION_ARG_NONE, &daemon_arg, "Run in background (as daemon)"},
{NULL}
};
int main(int argc, char *argv[])
{
GError *error = NULL;
GOptionContext *context;
/* Query current locale */
setlocale(LC_CTYPE, "");
dbus_init();
context = g_option_context_new("- a bluetooth network manager");
g_option_context_add_main_entries(context, entries, NULL);
g_option_context_set_summary(context, "Version "PACKAGE_VERSION);
g_option_context_set_description(context,
"Connect Options:\n"
" -c, --connect <name|mac> <uuid>\n"
" Where\n"
" `name|mac` is a device Name or MAC\n"
" `uuid` is:\n"
" Profile short name: gn, panu or nap\n\n"
"Server Options:\n"
" -s, --server <gn|panu|nap> <brige>\n"
" Every new connection to this server will be added the `bridge` interface\n\n"
"Report bugs to <"PACKAGE_BUGREPORT">."
"Project home page <"PACKAGE_URL">."
);
if (!g_option_context_parse(context, &argc, &argv, &error)) {
g_print("%s: %s\n", g_get_prgname(), error->message);
g_print("Try `%s --help` for more information.\n", g_get_prgname());
exit(EXIT_FAILURE);
} else if (!connect_arg && !server_arg) {
g_print("%s", g_option_context_get_help(context, FALSE, NULL));
exit(EXIT_FAILURE);
} else if (connect_arg && (argc != 3 || strlen(argv[1]) == 0 || strlen(argv[2]) == 0)) {
g_print("%s: Invalid arguments for --connect\n", g_get_prgname());
g_print("Try `%s --help` for more information.\n", g_get_prgname());
exit(EXIT_FAILURE);
} else if (server_arg && (argc != 3 || strlen(argv[1]) == 0 || strlen(argv[2]) == 0)) {
g_print("%s: Invalid arguments for --server\n", g_get_prgname());
g_print("Try `%s --help` for more information.\n", g_get_prgname());
exit(EXIT_FAILURE);
}
g_option_context_free(context);
if (!dbus_system_connect(&error)) {
g_printerr("Couldn't connect to DBus system bus: %s\n", error->message);
exit(EXIT_FAILURE);
}
/* Check, that bluetooth daemon is running */
if (!intf_supported(BLUEZ_DBUS_SERVICE_NAME, MANAGER_DBUS_PATH, MANAGER_DBUS_INTERFACE)) {
g_printerr("%s: bluez service is not found\n", g_get_prgname());
g_printerr("Did you forget to run bluetoothd?\n");
exit(EXIT_FAILURE);
}
Adapter *adapter = find_adapter(adapter_arg, &error);
exit_if_error(error);
if (connect_arg) {
connect_device_arg = argv[1];
connect_uuid_arg = argv[2];
Device *device = find_device(adapter, connect_device_arg, &error);
exit_if_error(error);
if (!intf_supported(BLUEZ_DBUS_SERVICE_NAME, device_get_dbus_object_path(device), NETWORK_DBUS_INTERFACE)) {
g_printerr("Network service is not supported by this device\n");
exit(EXIT_FAILURE);
}
mainloop = g_main_loop_new(NULL, FALSE);
Network *network = network_new(device_get_dbus_object_path(device));
guint prop_sig_sub_id = g_dbus_connection_signal_subscribe(system_conn, "org.bluez", "org.freedesktop.DBus.Properties", "PropertiesChanged", network_get_dbus_object_path(network), NULL, G_DBUS_SIGNAL_FLAGS_NONE, _bt_network_property_changed, mainloop, NULL);
if (network_get_connected(network, NULL) == TRUE) {
g_print("Network service is already connected\n");
} else {
gchar *intf = (gchar *) network_connect(network, connect_uuid_arg, &error);
exit_if_error(error);
trap_signals();
g_main_loop_run(mainloop);
/* Force disconnect the network device */
if (network_get_connected(network, NULL) == TRUE) {
network_disconnect(network, NULL);
}
g_free(intf);
}
g_dbus_connection_signal_unsubscribe(system_conn, prop_sig_sub_id);
g_main_loop_unref(mainloop);
g_object_unref(network);
g_object_unref(device);
} else if (server_arg) {
server_uuid_arg = argv[1];
server_brige_arg = argv[2];
if (g_ascii_strcasecmp(server_uuid_arg, "gn") != 0 && g_ascii_strcasecmp(server_uuid_arg, "panu") != 0 && g_ascii_strcasecmp(server_uuid_arg, "nap") != 0) {
g_print("%s: Invalid server UUID: %s\n", g_get_prgname(), server_uuid_arg);
g_print("Try `%s --help` for more information.\n", g_get_prgname());
exit(EXIT_FAILURE);
}
if (!intf_supported(BLUEZ_DBUS_SERVICE_NAME, adapter_get_dbus_object_path(adapter), NETWORK_SERVER_DBUS_INTERFACE)) {
g_printerr("Network server is not supported by this adapter\n");
exit(EXIT_FAILURE);
}
gchar *server_uuid_upper = g_ascii_strup(server_uuid_arg, -1);
NetworkServer *network_server = network_server_new(adapter_get_dbus_object_path(adapter));
network_server_register(network_server, server_uuid_arg, server_brige_arg, &error);
exit_if_error(error);
g_print("%s server registered\n", server_uuid_upper);
mainloop = g_main_loop_new(NULL, FALSE);
if (daemon_arg) {
pid_t pid, sid;
/* Fork off the parent process */
pid = fork();
if (pid < 0)
exit(EXIT_FAILURE);
/* Ok, terminate parent proccess */
if (pid > 0)
exit(EXIT_SUCCESS);
/* Create a new SID for the child process */
sid = setsid();
if (sid < 0)
exit(EXIT_FAILURE);
/* Close out the standard file descriptors */
close(STDIN_FILENO);
close(STDOUT_FILENO);
close(STDERR_FILENO);
}
trap_signals();
g_main_loop_run(mainloop);
network_server_unregister(network_server, server_uuid_arg, &error);
exit_if_error(error);
g_print("%s server unregistered\n", server_uuid_upper);
g_main_loop_unref(mainloop);
g_free(server_uuid_upper);
g_object_unref(network_server);
}
g_object_unref(adapter);
dbus_disconnect();
exit(EXIT_SUCCESS);
}
| gpl-2.0 |
Khira/La-Confrerie | src/game/WaypointMovementGenerator.cpp | 30591 | /*
* Copyright (C) 2005-2010 MaNGOS <http://getmangos.com/>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
creature_movement Table
alter table creature_movement add `textid1` int(11) NOT NULL default '0';
alter table creature_movement add `textid2` int(11) NOT NULL default '0';
alter table creature_movement add `textid3` int(11) NOT NULL default '0';
alter table creature_movement add `textid4` int(11) NOT NULL default '0';
alter table creature_movement add `textid5` int(11) NOT NULL default '0';
alter table creature_movement add `emote` int(10) unsigned default '0';
alter table creature_movement add `spell` int(5) unsigned default '0';
alter table creature_movement add `wpguid` int(11) default '0';
*/
#include <ctime>
#include "WaypointMovementGenerator.h"
#include "ObjectMgr.h"
#include "Creature.h"
#include "DestinationHolderImp.h"
#include "CreatureAI.h"
#include "WaypointManager.h"
#include "WorldPacket.h"
#include "ScriptCalls.h"
#include <cassert>
//-----------------------------------------------//
void WaypointMovementGenerator<Creature>::LoadPath(Creature &creature)
{
DETAIL_FILTER_LOG(LOG_FILTER_AI_AND_MOVEGENSS, "LoadPath: loading waypoint path for creature %u, %u", creature.GetGUIDLow(), creature.GetDBTableGUIDLow());
i_path = sWaypointMgr.GetPath(creature.GetDBTableGUIDLow());
// We may LoadPath() for several occasions:
// 1: When creature.MovementType=2
// 1a) Path is selected by creature.guid == creature_movement.id
// 1b) Path for 1a) does not exist and then use path from creature.GetEntry() == creature_movement_template.entry
// 2: When creature_template.MovementType=2
// 2a) Creature is summoned and has creature_template.MovementType=2
// Creators need to be sure that creature_movement_template is always valid for summons.
// Mob that can be summoned anywhere should not have creature_movement_template for example.
// No movement found for guid
if (!i_path)
{
i_path = sWaypointMgr.GetPathTemplate(creature.GetEntry());
// No movement found for entry
if (!i_path)
{
sLog.outErrorDb("WaypointMovementGenerator::LoadPath: creature %s (Entry: %u GUID: %u) doesn't have waypoint path",
creature.GetName(), creature.GetEntry(), creature.GetDBTableGUIDLow());
return;
}
}
// We have to set the destination here (for the first point), right after Initialize. Without, we may not have valid xyz for GetResetPosition
CreatureTraveller traveller(creature);
if (creature.CanFly())
creature.AddSplineFlag(SPLINEFLAG_UNKNOWN7);
const WaypointNode &node = i_path->at(i_currentNode);
i_destinationHolder.SetDestination(traveller, node.x, node.y, node.z);
i_nextMoveTime.Reset(i_destinationHolder.GetTotalTravelTime());
}
void WaypointMovementGenerator<Creature>::Initialize(Creature &creature)
{
LoadPath(creature);
creature.addUnitState(UNIT_STAT_ROAMING|UNIT_STAT_ROAMING_MOVE);
}
void WaypointMovementGenerator<Creature>::Finalize(Creature &creature)
{
creature.clearUnitState(UNIT_STAT_ROAMING|UNIT_STAT_ROAMING_MOVE);
}
void WaypointMovementGenerator<Creature>::Interrupt(Creature &creature)
{
creature.clearUnitState(UNIT_STAT_ROAMING|UNIT_STAT_ROAMING_MOVE);
}
void WaypointMovementGenerator<Creature>::Reset(Creature &creature)
{
SetStoppedByPlayer(false);
i_nextMoveTime.Reset(0);
creature.addUnitState(UNIT_STAT_ROAMING|UNIT_STAT_ROAMING_MOVE);
}
bool WaypointMovementGenerator<Creature>::Update(Creature &creature, const uint32 &diff)
{
if (!&creature)
return true;
// Waypoint movement can be switched on/off
// This is quite handy for escort quests and other stuff
if (creature.hasUnitState(UNIT_STAT_NOT_MOVE))
{
creature.clearUnitState(UNIT_STAT_ROAMING_MOVE);
return true;
}
// prevent a crash at empty waypoint path.
if (!i_path || i_path->empty())
{
creature.clearUnitState(UNIT_STAT_ROAMING_MOVE);
return true;
}
if (i_currentNode >= i_path->size())
{
sLog.outError("WaypointMovement currentNode (%u) is equal or bigger than path size (creature entry %u)", i_currentNode, creature.GetEntry());
i_currentNode = 0;
}
CreatureTraveller traveller(creature);
i_nextMoveTime.Update(diff);
if (i_destinationHolder.UpdateTraveller(traveller, diff, false, true))
{
if (!IsActive(creature)) // force stop processing (movement can move out active zone with cleanup movegens list)
return true; // not expire now, but already lost
}
// creature has been stopped in middle of the waypoint segment
if (!i_destinationHolder.HasArrived() && creature.IsStopped())
{
// Timer has elapsed, meaning this part controlled it
if (i_nextMoveTime.Passed())
{
SetStoppedByPlayer(false);
creature.addUnitState(UNIT_STAT_ROAMING_MOVE);
if (creature.CanFly())
creature.AddSplineFlag(SPLINEFLAG_UNKNOWN7);
// Now we re-set destination to same node and start travel
const WaypointNode &node = i_path->at(i_currentNode);
i_destinationHolder.SetDestination(traveller, node.x, node.y, node.z);
i_nextMoveTime.Reset(i_destinationHolder.GetTotalTravelTime());
}
else // if( !i_nextMoveTime.Passed())
{
// unexpected end of timer && creature stopped && not at end of segment
if (!IsStoppedByPlayer())
{
// Put 30 seconds delay
i_destinationHolder.IncreaseTravelTime(STOP_TIME_FOR_PLAYER);
i_nextMoveTime.Reset(STOP_TIME_FOR_PLAYER);
SetStoppedByPlayer(true); // Mark we did it
}
}
return true; // Abort here this update
}
if (creature.IsStopped())
{
if (!m_isArrivalDone)
{
if (i_path->at(i_currentNode).orientation != 100)
creature.SetOrientation(i_path->at(i_currentNode).orientation);
if (i_path->at(i_currentNode).script_id)
{
DEBUG_FILTER_LOG(LOG_FILTER_AI_AND_MOVEGENSS, "Creature movement start script %u at point %u for creature %u (entry %u).", i_path->at(i_currentNode).script_id, i_currentNode, creature.GetDBTableGUIDLow(), creature.GetEntry());
creature.GetMap()->ScriptsStart(sCreatureMovementScripts, i_path->at(i_currentNode).script_id, &creature, &creature);
}
// We have reached the destination and can process behavior
if (WaypointBehavior *behavior = i_path->at(i_currentNode).behavior)
{
if (behavior->emote != 0)
creature.HandleEmote(behavior->emote);
if (behavior->spell != 0)
{
creature.CastSpell(&creature, behavior->spell, false);
if (!IsActive(creature)) // force stop processing (cast can change movegens list)
return true; // not expire now, but already lost
}
if (behavior->model1 != 0)
creature.SetDisplayId(behavior->model1);
if (behavior->textid[0])
{
// Not only one text is set
if (behavior->textid[1])
{
// Select one from max 5 texts (0 and 1 already checked)
int i = 2;
for(; i < MAX_WAYPOINT_TEXT; ++i)
{
if (!behavior->textid[i])
break;
}
creature.Say(behavior->textid[rand() % i], 0, 0);
}
else
creature.Say(behavior->textid[0], 0, 0);
}
} // wpBehaviour found
// Can only do this once for the node
m_isArrivalDone = true;
// Inform script
MovementInform(creature);
if (!IsActive(creature)) // force stop processing (movement can move out active zone with cleanup movegens list)
return true; // not expire now, but already lost
// prevent a crash at empty waypoint path.
if (!i_path || i_path->empty() || i_currentNode >= i_path->size())
{
creature.clearUnitState(UNIT_STAT_ROAMING_MOVE);
return true;
}
}
} // i_creature.IsStopped()
// This is at the end of waypoint segment (incl. was previously stopped by player, extending the time)
if (i_nextMoveTime.Passed())
{
// If stopped then begin a new move segment
if (creature.IsStopped())
{
creature.addUnitState(UNIT_STAT_ROAMING_MOVE);
if (creature.CanFly())
creature.AddSplineFlag(SPLINEFLAG_UNKNOWN7);
if (WaypointBehavior *behavior = i_path->at(i_currentNode).behavior)
{
if (behavior->model2 != 0)
creature.SetDisplayId(behavior->model2);
creature.SetUInt32Value(UNIT_NPC_EMOTESTATE, 0);
}
// behavior for "departure" of the current node is done
m_isArrivalDone = false;
// Proceed with increment current node and then send to the next destination
++i_currentNode;
// Oops, end of the line so need to start from the beginning
if (i_currentNode >= i_path->size())
i_currentNode = 0;
if (i_path->at(i_currentNode).orientation != 100)
creature.SetOrientation(i_path->at(i_currentNode).orientation);
const WaypointNode &node = i_path->at(i_currentNode);
i_destinationHolder.SetDestination(traveller, node.x, node.y, node.z);
i_nextMoveTime.Reset(i_destinationHolder.GetTotalTravelTime());
}
else
{
// If not stopped then stop it
creature.clearUnitState(UNIT_STAT_ROAMING_MOVE);
SetStoppedByPlayer(false);
// Set TimeTracker to waittime for the current node
i_nextMoveTime.Reset(i_path->at(i_currentNode).delay);
}
}
return true;
}
void WaypointMovementGenerator<Creature>::MovementInform(Creature &creature)
{
if (creature.AI())
creature.AI()->MovementInform(WAYPOINT_MOTION_TYPE, i_currentNode);
}
bool WaypointMovementGenerator<Creature>::GetResetPosition(Creature&, float& x, float& y, float& z)
{
return PathMovementBase<Creature, WaypointPath const*>::GetPosition(x,y,z);
}
//----------------------------------------------------//
uint32 FlightPathMovementGenerator::GetPathAtMapEnd() const
{
if (i_currentNode >= i_path->size())
return i_path->size();
uint32 curMapId = (*i_path)[i_currentNode].mapid;
for(uint32 i = i_currentNode; i < i_path->size(); ++i)
{
if ((*i_path)[i].mapid != curMapId)
return i;
}
return i_path->size();
}
void FlightPathMovementGenerator::Initialize(Player &player)
{
Reset(player);
}
void FlightPathMovementGenerator::Finalize(Player & player)
{
// remove flag to prevent send object build movement packets for flight state and crash (movement generator already not at top of stack)
player.clearUnitState(UNIT_STAT_TAXI_FLIGHT);
float x, y, z;
i_destinationHolder.GetLocationNow(player.GetBaseMap(), x, y, z);
player.SetPosition(x, y, z, player.GetOrientation());
player.Unmount();
player.RemoveFlag(UNIT_FIELD_FLAGS,UNIT_FLAG_DISABLE_MOVE | UNIT_FLAG_TAXI_FLIGHT);
if(player.m_taxi.empty())
{
player.getHostileRefManager().setOnlineOfflineState(true);
if(player.pvpInfo.inHostileArea)
player.CastSpell(&player, 2479, true);
// update z position to ground and orientation for landing point
// this prevent cheating with landing point at lags
// when client side flight end early in comparison server side
player.StopMoving();
}
}
void FlightPathMovementGenerator::Interrupt(Player & player)
{
player.clearUnitState(UNIT_STAT_TAXI_FLIGHT);
}
void FlightPathMovementGenerator::Reset(Player & player)
{
player.getHostileRefManager().setOnlineOfflineState(false);
player.addUnitState(UNIT_STAT_TAXI_FLIGHT);
player.SetFlag(UNIT_FIELD_FLAGS,UNIT_FLAG_DISABLE_MOVE | UNIT_FLAG_TAXI_FLIGHT);
Traveller<Player> traveller(player);
// do not send movement, it was sent already
i_destinationHolder.SetDestination(traveller, (*i_path)[i_currentNode].x, (*i_path)[i_currentNode].y, (*i_path)[i_currentNode].z, false);
player.SendMonsterMoveByPath(GetPath(),GetCurrentNode(),GetPathAtMapEnd(), SplineFlags(SPLINEFLAG_WALKMODE|SPLINEFLAG_FLYING));
}
bool FlightPathMovementGenerator::Update(Player &player, const uint32 &diff)
{
if (MovementInProgress())
{
Traveller<Player> traveller(player);
if( i_destinationHolder.UpdateTraveller(traveller, diff, false) )
{
if (!IsActive(player)) // force stop processing (movement can move out active zone with cleanup movegens list)
return true; // not expire now, but already lost
i_destinationHolder.ResetUpdate(FLIGHT_TRAVEL_UPDATE);
if (i_destinationHolder.HasArrived())
{
DoEventIfAny(player,(*i_path)[i_currentNode],false);
uint32 curMap = (*i_path)[i_currentNode].mapid;
++i_currentNode;
if (MovementInProgress())
{
DoEventIfAny(player,(*i_path)[i_currentNode],true);
DEBUG_FILTER_LOG(LOG_FILTER_AI_AND_MOVEGENSS, "loading node %u for player %s", i_currentNode, player.GetName());
if ((*i_path)[i_currentNode].mapid == curMap)
{
// do not send movement, it was sent already
i_destinationHolder.SetDestination(traveller, (*i_path)[i_currentNode].x, (*i_path)[i_currentNode].y, (*i_path)[i_currentNode].z, false);
}
return true;
}
}
else
return true;
}
else
return true;
}
// we have arrived at the end of the path
return false;
}
void FlightPathMovementGenerator::SetCurrentNodeAfterTeleport()
{
if (i_path->empty())
return;
uint32 map0 = (*i_path)[0].mapid;
for (size_t i = 1; i < i_path->size(); ++i)
{
if ((*i_path)[i].mapid != map0)
{
i_currentNode = i;
return;
}
}
}
void FlightPathMovementGenerator::DoEventIfAny(Player& player, TaxiPathNodeEntry const& node, bool departure)
{
if (uint32 eventid = departure ? node.departureEventID : node.arrivalEventID)
{
DEBUG_FILTER_LOG(LOG_FILTER_AI_AND_MOVEGENSS, "Taxi %s event %u of node %u of path %u for player %s", departure ? "departure" : "arrival", eventid, node.index, node.path, player.GetName());
if (!Script->ProcessEventId(eventid, &player, &player, departure))
player.GetMap()->ScriptsStart(sEventScripts, eventid, &player, &player);
}
}
//
// Unique1's ASTAR Pathfinding Code... For future use & reference...
//
#ifdef __PATHFINDING__
int GetFCost(int to, int num, int parentNum, float *gcost); // Below...
int ShortenASTARRoute(short int *pathlist, int number)
{ // Wrote this to make the routes a little smarter (shorter)... No point looping back to the same places... Unique1
short int temppathlist[MAX_PATHLIST_NODES];
int count = 0;
// int count2 = 0;
int temp, temp2;
int link;
int upto = 0;
for (temp = number; temp >= 0; temp--)
{
qboolean shortened = qfalse;
for (temp2 = 0; temp2 < temp; temp2++)
{
for (link = 0; link < nodes[pathlist[temp]].enodenum; link++)
{
if (nodes[pathlist[temp]].links[link].flags & PATH_BLOCKED)
continue;
//if ((bot->client->ps.eFlags & EF_TANK) && nodes[bot->current_node].links[link].flags & PATH_NOTANKS) //if this path is blocked, skip it
// continue;
//if (nodes[nodes[pathlist[temp]].links[link].targetNode].origin[2] > nodes[pathlist[temp]].origin[2] + 32)
// continue;
if (nodes[pathlist[temp]].links[link].targetNode == pathlist[temp2])
{ // Found a shorter route...
//if (OrgVisible(nodes[pathlist[temp2]].origin, nodes[pathlist[temp]].origin, -1))
{
temppathlist[count] = pathlist[temp2];
temp = temp2;
++count;
shortened = qtrue;
}
}
}
}
if (!shortened)
{
temppathlist[count] = pathlist[temp];
++count;
}
}
upto = count;
for (temp = 0; temp < count; temp++)
{
pathlist[temp] = temppathlist[upto];
--upto;
}
G_Printf("ShortenASTARRoute: Path size reduced from %i to %i nodes...n", number, count);
return count;
}
/*
===========================================================================
CreatePathAStar
This function uses the A* pathfinding algorithm to determine the
shortest path between any two nodes.
It's fairly complex, so I'm not really going to explain it much.
Look up A* and binary heaps for more info.
pathlist stores the ideal path between the nodes, in reverse order,
and the return value is the number of nodes in that path
===========================================================================
*/
int CreatePathAStar(gentity_t *bot, int from, int to, short int *pathlist)
{
//all the data we have to hold...since we can't do dynamic allocation, has to be MAX_NODES
//we can probably lower this later - eg, the open list should never have more than at most a few dozen items on it
short int openlist[MAX_NODES+1]; //add 1 because it's a binary heap, and they don't use 0 - 1 is the first used index
float gcost[MAX_NODES];
int fcost[MAX_NODES];
char list[MAX_NODES]; //0 is neither, 1 is open, 2 is closed - char because it's the smallest data type
short int parent[MAX_NODES];
short int numOpen = 0;
short int atNode, temp, newnode=-1;
qboolean found = qfalse;
int count = -1;
float gc;
int i, u, v, m;
vec3_t vec;
//clear out all the arrays
memset(openlist, 0, sizeof(short int)*(MAX_NODES+1));
memset(fcost, 0, sizeof(int)*MAX_NODES);
memset(list, 0, sizeof(char)*MAX_NODES);
memset(parent, 0, sizeof(short int)*MAX_NODES);
memset(gcost, -1, sizeof(float)*MAX_NODES);
//make sure we have valid data before calculating everything
if ((from == NODE_INVALID) || (to == NODE_INVALID) || (from >= MAX_NODES) || (to >= MAX_NODES) || (from == to))
return -1;
openlist[1] = from; //add the starting node to the open list
++numOpen;
gcost[from] = 0; //its f and g costs are obviously 0
fcost[from] = 0;
while (1)
{
if (numOpen != 0) //if there are still items in the open list
{
//pop the top item off of the list
atNode = openlist[1];
list[atNode] = 2; //put the node on the closed list so we don't check it again
--numOpen;
openlist[1] = openlist[numOpen+1]; //move the last item in the list to the top position
v = 1;
//this while loop reorders the list so that the new lowest fcost is at the top again
while (1)
{
u = v;
if ((2*u+1) < numOpen) //if both children exist
{
if (fcost[openlist[u]] >= fcost[openlist[2*u]])
v = 2*u;
if (fcost[openlist[v]] >= fcost[openlist[2*u+1]])
v = 2*u+1;
}
else
{
if ((2*u) < numOpen) //if only one child exists
{
if (fcost[openlist[u]] >= fcost[openlist[2*u]])
v = 2*u;
}
}
if (u != v) //if they're out of order, swap this item with its parent
{
temp = openlist[u];
openlist[u] = openlist[v];
openlist[v] = temp;
}
else
break;
}
for (i = 0; i < nodes[atNode].enodenum; ++i) //loop through all the links for this node
{
newnode = nodes[atNode].links[i].targetNode;
//if this path is blocked, skip it
if (nodes[atNode].links[i].flags & PATH_BLOCKED)
continue;
//if this path is blocked, skip it
if (bot->client && (bot->client->ps.eFlags & EF_TANK) && nodes[atNode].links[i].flags & PATH_NOTANKS)
continue;
//skip any unreachable nodes
if (bot->client && (nodes[newnode].type & NODE_ALLY_UNREACHABLE) && (bot->client->sess.sessionTeam == TEAM_ALLIES))
continue;
if (bot->client && (nodes[newnode].type & NODE_AXIS_UNREACHABLE) && (bot->client->sess.sessionTeam == TEAM_AXIS))
continue;
if (list[newnode] == 2) //if this node is on the closed list, skip it
continue;
if (list[newnode] != 1) //if this node is not already on the open list
{
openlist[++numOpen] = newnode; //add the new node to the open list
list[newnode] = 1;
parent[newnode] = atNode; //record the node's parent
if (newnode == to) //if we've found the goal, don't keep computing paths!
break; //this will break the 'for' and go all the way to 'if (list[to] == 1)'
//store it's f cost value
fcost[newnode] = GetFCost(to, newnode, parent[newnode], gcost);
//this loop re-orders the heap so that the lowest fcost is at the top
m = numOpen;
while (m != 1) //while this item isn't at the top of the heap already
{
//if it has a lower fcost than its parent
if (fcost[openlist[m]] <= fcost[openlist[m/2]])
{
temp = openlist[m/2];
openlist[m/2] = openlist[m];
openlist[m] = temp; //swap them
m /= 2;
}
else
break;
}
}
else //if this node is already on the open list
{
gc = gcost[atNode];
VectorSubtract(nodes[newnode].origin, nodes[atNode].origin, vec);
gc += VectorLength(vec); //calculate what the gcost would be if we reached this node along the current path
if (gc < gcost[newnode]) //if the new gcost is less (ie, this path is shorter than what we had before)
{
parent[newnode] = atNode; //set the new parent for this node
gcost[newnode] = gc; //and the new g cost
for (i = 1; i < numOpen; ++i) //loop through all the items on the open list
{
if (openlist[i] == newnode) //find this node in the list
{
//calculate the new fcost and store it
fcost[newnode] = GetFCost(to, newnode, parent[newnode], gcost);
//reorder the list again, with the lowest fcost item on top
m = i;
while (m != 1)
{
//if the item has a lower fcost than it's parent
if (fcost[openlist[m]] < fcost[openlist[m/2]])
{
temp = openlist[m/2];
openlist[m/2] = openlist[m];
openlist[m] = temp; //swap them
m /= 2;
}
else
break;
}
break; //exit the 'for' loop because we already changed this node
} //if
} //for
} //if (gc < gcost[newnode])
} //if (list[newnode] != 1) --> else
} //for (loop through links)
} //if (numOpen != 0)
else
{
found = qfalse; //there is no path between these nodes
break;
}
if (list[to] == 1) //if the destination node is on the open list, we're done
{
found = qtrue;
break;
}
} //while (1)
if (found == qtrue) //if we found a path
{
//G_Printf("%s - path found!n", bot->client->pers.netname);
count = 0;
temp = to; //start at the end point
while (temp != from) //travel along the path (backwards) until we reach the starting point
{
pathlist[count++] = temp; //add the node to the pathlist and increment the count
temp = parent[temp]; //move to the parent of this node to continue the path
}
pathlist[count++] = from; //add the beginning node to the end of the pathlist
#ifdef __BOT_SHORTEN_ROUTING__
count = ShortenASTARRoute(pathlist, count); // This isn't working... Dunno why.. Unique1
#endif //__BOT_SHORTEN_ROUTING__
}
else
{
//G_Printf("^1*** ^4BOT DEBUG^5: (CreatePathAStar) There is no route between node ^7%i^5 and node ^7%i^5.n", from, to);
count = CreateDumbRoute(from, to, pathlist);
if (count > 0)
{
#ifdef __BOT_SHORTEN_ROUTING__
count = ShortenASTARRoute(pathlist, count); // This isn't working... Dunno why.. Unique1
#endif //__BOT_SHORTEN_ROUTING__
return count;
}
}
return count; //return the number of nodes in the path, -1 if not found
}
/*
===========================================================================
GetFCost
Utility function used by A* pathfinding to calculate the
cost to move between nodes towards a goal. Using the A*
algorithm F = G + H, G here is the distance along the node
paths the bot must travel, and H is the straight-line distance
to the goal node.
Returned as an int because more precision is unnecessary and it
will slightly speed up heap access
===========================================================================
*/
int GetFCost(int to, int num, int parentNum, float *gcost)
{
float gc = 0;
float hc = 0;
vec3_t v;
if (gcost[num] == -1)
{
if (parentNum != -1)
{
gc = gcost[parentNum];
VectorSubtract(nodes[num].origin, nodes[parentNum].origin, v);
gc += VectorLength(v);
}
gcost[num] = gc;
}
else
gc = gcost[num];
VectorSubtract(nodes[to].origin, nodes[num].origin, v);
hc = VectorLength(v);
return (int)(gc + hc);
}
#endif //__PATHFINDING__
| gpl-2.0 |
119/aircam-openwrt | package/lqtapi/src/mps/vmmc-cmds.h | 3783 | #ifndef __VMMC_CMDS_H__
#define __VMMC_CMDS_H__
#define _VMMC_CMD(_x) ((_x) << 24)
#define _VMMC_MOD(_x) ((_x) << 13)
#define _VMMC_ECMD(_x) ((_x) << 8)
#define _VMMC_MSG(_cmd, _mod, _ecmd) \
(_VMMC_CMD(_cmd) | _VMMC_ECMD(_ecmd) | _VMMC_MOD(_mod))
#define _VMMC_CHAN(_chan) ((_chan) << 16)
#define _VMMC_LENGTH(_length) ((_length) << 2)
#define VMMC_CMD_OPMODE(_chan) (_VMMC_MSG(1, 0, 0) | _VMMC_CHAN(_chan) | _VMMC_LENGTH(1))
#define VMMC_CMD_SIG(_chan) (_VMMC_MSG(6, 2, 1) | _VMMC_CHAN(_chan) | _VMMC_LENGTH(1))
#define VMMC_CMD_SIG_DATA(_enable, _event, _rate, _i1, _i2, _mute1, _mute2) \
(((_enable) << 31) | ((_event) << 30) | ((_i1) << 24) | ((_i2) << 16) | \
((_rate) << 23) | ((_mute1) << 15) | ((_mute2) << 14))
#define VMMC_CMD_SIG_SET_ENABLE(_data, _enable) (((_data) & ~BIT(31)) | ((_enable) << 31))
#define VMMC_CMD_SIG_SET_INPUTS(_data, _i1, _i2) (((_data) & ~0x3f3f0000) | \
((_i1) << 24) | ((_i2) << 16))
#define VMMC_CMD_DTMFR(_chan) (_VMMC_MSG(6, 2, 4) | _VMMC_CHAN(_chan) | _VMMC_LENGTH(1))
#define VMMC_CMD_DTMFR_DATA(_enable, _event, _nr) \
(((_enable) << 31) | ((_event) << 30) | (6 << 27) | ((_nr) << 16))
#define VMMC_CMD_CODER(_chan) (_VMMC_MSG(6, 3, 1) | _VMMC_CHAN(_chan) | _VMMC_LENGTH(4))
#define VMMC_CMD_CODER_DATA1(_enable, _rate, _ns, _pte, _nr, _i1, _hp, _pf, \
_cng, _bfi, _dec, _im, _pst, _sic, _em, _enc) \
(((_enable) << 31) | ((_rate) << 30) | ((_ns) << 29) | ((_pte) << 26) | \
((_nr) << 22) | ((_i1) << 16) | ((_hp) << 15) | ((_pf) << 14) | \
((_cng) << 13) | ((_bfi) << 12) | ((_dec) << 11) | ((_im) << 10) | \
((_pst) << 9) | ((_sic) << 8) | ((_em) << 7) | (_enc))
#define VMMC_CMD_CODER_DATA2(_gain1, _gain2) (((_gain1) << 16) | (_gain2))
#define VMMC_CMD_CODER_DATA3(_de, _ee, _i2, _red, _i3, _plc, _i4, _i5) \
(((_de) << 31) | ((_ee) << 30) | ((_i2) << 24) | ((_red) << 22) | \
((_i3) << 16) | ((_plc) << 15) | ((_i4) << 8) | (_i5))
#define VMMC_CMD_SERR_ACK(_chan) _VMMC_MSG(6, 7, 1) | _VMMC_CHAN(_chan) | _VMMC_LENGTH(1)
#define VMMC_CMD_SERR_ACK_DATA1(_foo) ((_foo) << 22)
#define VMMC_CMD_CODER_DATA4(_tsf) ((_tsf) << 31)
#define VMMC_EVENT_ID_MASK (_VMMC_MSG(0x1f, 0x7, 0x1f) | 0xff)
#define VMMC_MSG_GET_CHAN(_msg) (((_msg) >> 16) & 0x1f)
#define VMMC_EVENT_HOOK_STATE(_data) ((_data) & 1)
#define VMMC_EVENT_HOOK_ID (_VMMC_MSG(9, 1, 1) | _VMMC_LENGTH(1))
#define VMMC_EVENT_DTMF_ID (_VMMC_MSG(9, 2, 0) | _VMMC_LENGTH(1))
#define VMMC_VOICE_DATA(_type, _chan, _len) (((_type) << 24) | ((_chan) << 16) \
| (_len))
#define VMMC_CMD_ALI(_chan) (_VMMC_MSG(6, 1, 1) | _VMMC_CHAN(_chan) | _VMMC_LENGTH(3))
#define VMMC_CMD_ALI_DATA1(_enable, _rate, _ud, _eh, _eo, _i1, _dg1) \
(((_enable) << 31) | ((_rate) << 30) | ((_ud) << 29) | ((_eh) << 27) | \
((_eo) << 26) | ((_i1) << 16) | (_dg1))
#define VMMC_CMD_ALI_DATA2(_dg2, _i2, _i3) \
(((_dg2) << 16) | ((_i2) << 8) | (_i3))
#define VMMC_CMD_ALI_DATA3(_i4, _i5) \
(((_i4) << 24) | ((_i5) << 16))
#define VMMC_CMD_ALM_COEF(_chan, _offset, _len) \
(_VMMC_MSG(2, 0, _offset) | _VMMC_CHAN(_chan) | (_len))
#define CMD_VOICEREC_STATUS_PACKET 0x0
#define CMD_VOICEREC_DATA_PACKET 0x1
#define CMD_RTP_VOICE_DATA_PACKET 0x4
#define CMD_RTP_EVENT_PACKET 0x5
#define CMD_ADDRESS_PACKET 0x8
#define CMD_FAX_DATA_PACKET 0x10
#define CMD_FAX_STATUS_PACKET 0x11
#define CMD_P_PHONE_DATA_PACKET 0x12
#define CMD_P_PHONE_STATUS_PACKET 0x13
#define VMMC_CMD_RTP_CFG_US(_chan) \
(_VMMC_MSG(6, 3, 17) | _VMMC_CHAN(_chan) | (36))
#define VMMC_CMD_RTP_CFG_DS(_chan) \
(_VMMC_MSG(6, 3, 25) | _VMMC_CHAN(_chan) | (32))
#define VMMC_CMD_LEC(_chan) \
(_VMMC_MSG(6, 2, 1) | _VMMC_CHAN(_chan) | _VMMC_LENGTH(1))
// (_VMMC_MSG(CMD_EOP, ALI_LEC_ECMD, MOD_ALI) | _VMMC_CHAN(_chan) | (32))
#define VMMC_CMD_LEC_DATA()
#endif
| gpl-2.0 |
rex-xxx/mt6572_x201 | dalvik/dx/src/com/android/dx/io/EncodedValueReader.java | 4797 | /*
* Copyright (C) 2011 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.android.dx.io;
import com.android.dx.util.ByteInput;
import com.android.dx.util.Leb128Utils;
/**
* SAX-style reader for encoded values.
* TODO: convert this to a pull-style reader
*/
public class EncodedValueReader {
public static final int ENCODED_BYTE = 0x00;
public static final int ENCODED_SHORT = 0x02;
public static final int ENCODED_CHAR = 0x03;
public static final int ENCODED_INT = 0x04;
public static final int ENCODED_LONG = 0x06;
public static final int ENCODED_FLOAT = 0x10;
public static final int ENCODED_DOUBLE = 0x11;
public static final int ENCODED_STRING = 0x17;
public static final int ENCODED_TYPE = 0x18;
public static final int ENCODED_FIELD = 0x19;
public static final int ENCODED_ENUM = 0x1b;
public static final int ENCODED_METHOD = 0x1a;
public static final int ENCODED_ARRAY = 0x1c;
public static final int ENCODED_ANNOTATION = 0x1d;
public static final int ENCODED_NULL = 0x1e;
public static final int ENCODED_BOOLEAN = 0x1f;
protected final ByteInput in;
public EncodedValueReader(ByteInput in) {
this.in = in;
}
public EncodedValueReader(EncodedValue in) {
this(in.asByteInput());
}
public final void readArray() {
int size = Leb128Utils.readUnsignedLeb128(in);
visitArray(size);
for (int i = 0; i < size; i++) {
readValue();
}
}
public final void readAnnotation() {
int typeIndex = Leb128Utils.readUnsignedLeb128(in);
int size = Leb128Utils.readUnsignedLeb128(in);
visitAnnotation(typeIndex, size);
for (int i = 0; i < size; i++) {
visitAnnotationName(Leb128Utils.readUnsignedLeb128(in));
readValue();
}
}
public final void readValue() {
int argAndType = in.readByte() & 0xff;
int type = argAndType & 0x1f;
int arg = (argAndType & 0xe0) >> 5;
int size = arg + 1;
switch (type) {
case ENCODED_BYTE:
case ENCODED_SHORT:
case ENCODED_CHAR:
case ENCODED_INT:
case ENCODED_LONG:
case ENCODED_FLOAT:
case ENCODED_DOUBLE:
visitPrimitive(argAndType, type, arg, size);
break;
case ENCODED_STRING:
visitString(type, readIndex(in, size));
break;
case ENCODED_TYPE:
visitType(type, readIndex(in, size));
break;
case ENCODED_FIELD:
case ENCODED_ENUM:
visitField(type, readIndex(in, size));
break;
case ENCODED_METHOD:
visitMethod(type, readIndex(in, size));
break;
case ENCODED_ARRAY:
visitArrayValue(argAndType);
readArray();
break;
case ENCODED_ANNOTATION:
visitAnnotationValue(argAndType);
readAnnotation();
break;
case ENCODED_NULL:
visitEncodedNull(argAndType);
break;
case ENCODED_BOOLEAN:
visitEncodedBoolean(argAndType);
break;
}
}
protected void visitArray(int size) {}
protected void visitAnnotation(int typeIndex, int size) {}
protected void visitAnnotationName(int nameIndex) {}
protected void visitPrimitive(int argAndType, int type, int arg, int size) {
for (int i = 0; i < size; i++) {
in.readByte();
}
}
protected void visitString(int type, int index) {}
protected void visitType(int type, int index) {}
protected void visitField(int type, int index) {}
protected void visitMethod(int type, int index) {}
protected void visitArrayValue(int argAndType) {}
protected void visitAnnotationValue(int argAndType) {}
protected void visitEncodedBoolean(int argAndType) {}
protected void visitEncodedNull(int argAndType) {}
private int readIndex(ByteInput in, int byteCount) {
int result = 0;
int shift = 0;
for (int i = 0; i < byteCount; i++) {
result += (in.readByte() & 0xff) << shift;
shift += 8;
}
return result;
}
}
| gpl-2.0 |
SpoonLabs/astor | examples/lang_39/src/java/org/apache/commons/lang3/text/StrLookup.java | 5459 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.commons.lang3.text;
import java.util.Map;
/**
* Lookup a String key to a String value.
* <p>
* This class represents the simplest form of a string to string map.
* It has a benefit over a map in that it can create the result on
* demand based on the key.
* <p>
* This class comes complete with various factory methods.
* If these do not suffice, you can subclass and implement your own matcher.
* <p>
* For example, it would be possible to implement a lookup that used the
* key as a primary key, and looked up the value on demand from the database
*
* @author Apache Software Foundation
* @since 2.2
* @version $Id$
*/
public abstract class StrLookup<V> {
/**
* Lookup that always returns null.
*/
private static final StrLookup<?> NONE_LOOKUP;
/**
* Lookup that uses System properties.
*/
private static final StrLookup<Object> SYSTEM_PROPERTIES_LOOKUP;
static {
NONE_LOOKUP = new MapStrLookup(null);
StrLookup lookup = null;
try {
lookup = new MapStrLookup(System.getProperties());
} catch (SecurityException ex) {
lookup = NONE_LOOKUP;
}
SYSTEM_PROPERTIES_LOOKUP = lookup;
}
//-----------------------------------------------------------------------
/**
* Returns a lookup which always returns null.
*
* @return a lookup that always returns null, not null
*/
public static StrLookup<?> noneLookup() {
return NONE_LOOKUP;
}
/**
* Returns a lookup which uses {@link System#getProperties() System properties}
* to lookup the key to value.
* <p>
* If a security manager blocked access to system properties, then null will
* be returned from every lookup.
* <p>
* If a null key is used, this lookup will throw a NullPointerException.
*
* @return a lookup using system properties, not null
*/
public static StrLookup<Object> systemPropertiesLookup() {
return SYSTEM_PROPERTIES_LOOKUP;
}
/**
* Returns a lookup which looks up values using a map.
* <p>
* If the map is null, then null will be returned from every lookup.
* The map result object is converted to a string using toString().
*
* @param map the map of keys to values, may be null
* @return a lookup using the map, not null
*/
public static <V> StrLookup mapLookup(Map<String, V> map) {
return new MapStrLookup<V>(map);
}
//-----------------------------------------------------------------------
/**
* Constructor.
*/
protected StrLookup() {
super();
}
/**
* Looks up a String key to a String value.
* <p>
* The internal implementation may use any mechanism to return the value.
* The simplest implementation is to use a Map. However, virtually any
* implementation is possible.
* <p>
* For example, it would be possible to implement a lookup that used the
* key as a primary key, and looked up the value on demand from the database
* Or, a numeric based implementation could be created that treats the key
* as an integer, increments the value and return the result as a string -
* converting 1 to 2, 15 to 16 etc.
*
* @param key the key to be looked up, may be null
* @return the matching value, null if no match
*/
public abstract String lookup(String key);
//-----------------------------------------------------------------------
/**
* Lookup implementation that uses a Map.
*/
static class MapStrLookup<V> extends StrLookup {
/** Map keys are variable names and value. */
private final Map<String, V> map;
/**
* Creates a new instance backed by a Map.
*
* @param map the map of keys to values, may be null
*/
MapStrLookup(Map<String, V> map) {
this.map = map;
}
/**
* Looks up a String key to a String value using the map.
* <p>
* If the map is null, then null is returned.
* The map result object is converted to a string using toString().
*
* @param key the key to be looked up, may be null
* @return the matching value, null if no match
*/
@Override
public String lookup(String key) {
if (map == null) {
return null;
}
Object obj = map.get(key);
if (obj == null) {
return null;
}
return obj.toString();
}
}
}
| gpl-2.0 |
scs/uclinux | user/blkfin-apps/mplayer/mplayer-svn-25211/libvo/vo_yuv4mpeg.c | 13603 | /*
* vo_yuv4mpeg.c, yuv4mpeg (mjpegtools) interface
*
* Thrown together by
* Robert Kesterson <[email protected]>
* Based on the pgm output plugin, the rgb2rgb postproc filter, divxdec,
* and probably others.
*
* This is undoubtedly incomplete, inaccurate, or just plain wrong. :-)
*
* 2002/06/19 Klaus Stengel <[email protected]>
* - added support for interlaced output
* Activate by using '-vo yuv4mpeg:interlaced'
* or '-vo yuv4mpeg:interlaced_bf' if your source has
* bottom fields first
* - added some additional checks to catch problems
*
* 2002/04/17 Juergen Hammelmann <[email protected]>
* - added support for output of subtitles
* best, if you give option '-osdlevel 0' to mplayer for
* no watching the seek+timer
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#include <fcntl.h>
#include <limits.h>
#include "config.h"
#include "subopt-helper.h"
#include "video_out.h"
#include "video_out_internal.h"
#include "mp_msg.h"
#include "help_mp.h"
#include "sub.h"
#include "fastmemcpy.h"
#include "libswscale/swscale.h"
#include "libswscale/rgb2rgb.h"
#include "libmpcodecs/vf_scale.h"
#include "libavutil/rational.h"
static vo_info_t info =
{
"yuv4mpeg output for mjpegtools",
"yuv4mpeg",
"Robert Kesterson <[email protected]>",
""
};
LIBVO_EXTERN (yuv4mpeg)
static int image_width = 0;
static int image_height = 0;
static float image_fps = 0;
static uint8_t *image = NULL;
static uint8_t *image_y = NULL;
static uint8_t *image_u = NULL;
static uint8_t *image_v = NULL;
static uint8_t *rgb_buffer = NULL;
static uint8_t *rgb_line_buffer = NULL;
static char *yuv_filename = NULL;
static int using_format = 0;
static FILE *yuv_out;
static int write_bytes;
#define Y4M_ILACE_NONE 'p' /* non-interlaced, progressive frame */
#define Y4M_ILACE_TOP_FIRST 't' /* interlaced, top-field first */
#define Y4M_ILACE_BOTTOM_FIRST 'b' /* interlaced, bottom-field first */
/* Set progressive mode as default */
static int config_interlace = Y4M_ILACE_NONE;
#define Y4M_IS_INTERLACED (config_interlace != Y4M_ILACE_NONE)
static int config(uint32_t width, uint32_t height, uint32_t d_width,
uint32_t d_height, uint32_t flags, char *title,
uint32_t format)
{
AVRational pixelaspect = av_div_q((AVRational){d_width, d_height},
(AVRational){width, height});
AVRational fps_frac = av_d2q(vo_fps, INT_MAX);
if (image_width == width && image_height == height &&
image_fps == vo_fps && vo_config_count)
return 0;
if (vo_config_count) {
mp_msg(MSGT_VO, MSGL_WARN,
"Video formats differ (w:%i=>%i, h:%i=>%i, fps:%f=>%f), "
"restarting output.\n",
image_width, width, image_height, height, image_fps, vo_fps);
uninit();
}
image_height = height;
image_width = width;
image_fps = vo_fps;
using_format = format;
if (Y4M_IS_INTERLACED)
{
if (height % 4)
{
mp_msg(MSGT_VO,MSGL_FATAL,
MSGTR_VO_YUV4MPEG_InterlacedHeightDivisibleBy4);
return -1;
}
rgb_line_buffer = malloc(image_width * 3);
if (!rgb_line_buffer)
{
mp_msg(MSGT_VO,MSGL_FATAL,
MSGTR_VO_YUV4MPEG_InterlacedLineBufAllocFail);
return -1;
}
if (using_format == IMGFMT_YV12)
mp_msg(MSGT_VO,MSGL_WARN,
MSGTR_VO_YUV4MPEG_InterlacedInputNotRGB);
}
if (width % 2)
{
mp_msg(MSGT_VO,MSGL_FATAL,
MSGTR_VO_YUV4MPEG_WidthDivisibleBy2);
return -1;
}
if(using_format != IMGFMT_YV12)
{
sws_rgb2rgb_init(get_sws_cpuflags());
rgb_buffer = malloc(image_width * image_height * 3);
if (!rgb_buffer)
{
mp_msg(MSGT_VO,MSGL_FATAL,
MSGTR_VO_YUV4MPEG_NoMemRGBFrameBuf);
return -1;
}
}
write_bytes = image_width * image_height * 3 / 2;
image = malloc(write_bytes);
yuv_out = fopen(yuv_filename, "wb");
if (!yuv_out || image == 0)
{
mp_msg(MSGT_VO,MSGL_FATAL,
MSGTR_VO_YUV4MPEG_OutFileOpenError,
yuv_filename);
return -1;
}
image_y = image;
image_u = image_y + image_width * image_height;
image_v = image_u + image_width * image_height / 4;
fprintf(yuv_out, "YUV4MPEG2 W%d H%d F%d:%d I%c A%d:%d\n",
image_width, image_height, fps_frac.num, fps_frac.den,
config_interlace,
pixelaspect.num, pixelaspect.den);
fflush(yuv_out);
return 0;
}
/* Only use when h divisable by 2! */
static void swap_fields(uint8_t *ptr, const int h, const int stride)
{
int i;
for (i=0; i<h; i +=2)
{
fast_memcpy(rgb_line_buffer , ptr + stride * i , stride);
fast_memcpy(ptr + stride * i , ptr + stride * (i+1), stride);
fast_memcpy(ptr + stride * (i+1), rgb_line_buffer , stride);
}
}
static void draw_alpha(int x0, int y0, int w, int h, unsigned char *src,
unsigned char *srca, int stride) {
switch (using_format)
{
case IMGFMT_YV12:
vo_draw_alpha_yv12(w, h, src, srca, stride,
image + y0 * image_width + x0, image_width);
break;
case IMGFMT_BGR|24:
case IMGFMT_RGB|24:
if (config_interlace != Y4M_ILACE_BOTTOM_FIRST)
vo_draw_alpha_rgb24(w, h, src, srca, stride,
rgb_buffer + (y0 * image_width + x0) * 3, image_width * 3);
else
{
swap_fields (rgb_buffer, image_height, image_width * 3);
vo_draw_alpha_rgb24(w, h, src, srca, stride,
rgb_buffer + (y0 * image_width + x0) * 3, image_width * 3);
swap_fields (rgb_buffer, image_height, image_width * 3);
}
break;
}
}
static void draw_osd(void)
{
vo_draw_text(image_width, image_height, draw_alpha);
}
static void deinterleave_fields(uint8_t *ptr, const int stride,
const int img_height)
{
unsigned int i, j, k_start = 1, modv = img_height - 1;
unsigned char *line_state = malloc(modv);
for (i=0; i<modv; i++)
line_state[i] = 0;
line_state[0] = 1;
while(k_start < modv)
{
i = j = k_start;
fast_memcpy(rgb_line_buffer, ptr + stride * i, stride);
while (!line_state[j])
{
line_state[j] = 1;
i = j;
j = j * 2 % modv;
fast_memcpy(ptr + stride * i, ptr + stride * j, stride);
}
fast_memcpy(ptr + stride * i, rgb_line_buffer, stride);
while(k_start < modv && line_state[k_start])
k_start++;
}
free(line_state);
}
static void vo_y4m_write(const void *ptr, const size_t num_bytes)
{
if (fwrite(ptr, 1, num_bytes, yuv_out) != num_bytes)
mp_msg(MSGT_VO,MSGL_ERR,
MSGTR_VO_YUV4MPEG_OutFileWriteError);
}
static int write_last_frame(void)
{
uint8_t *upper_y, *upper_u, *upper_v, *rgb_buffer_lower;
int rgb_stride, uv_stride, field_height;
unsigned int i, low_ofs;
fprintf(yuv_out, "FRAME\n");
if (using_format != IMGFMT_YV12)
{
rgb_stride = image_width * 3;
uv_stride = image_width / 2;
if (Y4M_IS_INTERLACED)
{
field_height = image_height / 2;
upper_y = image;
upper_u = upper_y + image_width * field_height;
upper_v = upper_u + image_width * field_height / 4;
low_ofs = image_width * field_height * 3 / 2;
rgb_buffer_lower = rgb_buffer + rgb_stride * field_height;
/* Write Y plane */
for(i = 0; i < field_height; i++)
{
vo_y4m_write(upper_y + image_width * i, image_width);
vo_y4m_write(upper_y + image_width * i + low_ofs, image_width);
}
/* Write U and V plane */
for(i = 0; i < field_height / 2; i++)
{
vo_y4m_write(upper_u + uv_stride * i, uv_stride);
vo_y4m_write(upper_u + uv_stride * i + low_ofs, uv_stride);
}
for(i = 0; i < field_height / 2; i++)
{
vo_y4m_write(upper_v + uv_stride * i, uv_stride);
vo_y4m_write(upper_v + uv_stride * i + low_ofs, uv_stride);
}
return VO_TRUE; /* Image written; We have to stop here */
}
}
/* Write progressive frame */
vo_y4m_write(image, write_bytes);
return VO_TRUE;
}
static void flip_page (void)
{
uint8_t *upper_y, *upper_u, *upper_v, *rgb_buffer_lower;
int rgb_stride, uv_stride, field_height;
unsigned int i, low_ofs;
fprintf(yuv_out, "FRAME\n");
if (using_format != IMGFMT_YV12)
{
rgb_stride = image_width * 3;
uv_stride = image_width / 2;
if (Y4M_IS_INTERLACED)
{
field_height = image_height / 2;
upper_y = image;
upper_u = upper_y + image_width * field_height;
upper_v = upper_u + image_width * field_height / 4;
low_ofs = image_width * field_height * 3 / 2;
rgb_buffer_lower = rgb_buffer + rgb_stride * field_height;
deinterleave_fields(rgb_buffer, rgb_stride, image_height);
rgb24toyv12(rgb_buffer, upper_y, upper_u, upper_v,
image_width, field_height,
image_width, uv_stride, rgb_stride);
rgb24toyv12(rgb_buffer_lower, upper_y + low_ofs,
upper_u + low_ofs, upper_v + low_ofs,
image_width, field_height,
image_width, uv_stride, rgb_stride);
/* Write Y plane */
for(i = 0; i < field_height; i++)
{
vo_y4m_write(upper_y + image_width * i, image_width);
vo_y4m_write(upper_y + image_width * i + low_ofs, image_width);
}
/* Write U and V plane */
for(i = 0; i < field_height / 2; i++)
{
vo_y4m_write(upper_u + uv_stride * i, uv_stride);
vo_y4m_write(upper_u + uv_stride * i + low_ofs, uv_stride);
}
for(i = 0; i < field_height / 2; i++)
{
vo_y4m_write(upper_v + uv_stride * i, uv_stride);
vo_y4m_write(upper_v + uv_stride * i + low_ofs, uv_stride);
}
return; /* Image written; We have to stop here */
}
rgb24toyv12(rgb_buffer, image_y, image_u, image_v,
image_width, image_height,
image_width, uv_stride, rgb_stride);
}
/* Write progressive frame */
vo_y4m_write(image, write_bytes);
}
static int draw_slice(uint8_t *srcimg[], int stride[], int w,int h,int x,int y)
{
int i;
uint8_t *dst, *src = srcimg[0];
switch (using_format)
{
case IMGFMT_YV12:
// copy Y:
dst = image_y + image_width * y + x;
for (i = 0; i < h; i++)
{
fast_memcpy(dst, src, w);
src += stride[0];
dst += image_width;
}
{
// copy U + V:
int imgstride = image_width >> 1;
uint8_t *src1 = srcimg[1];
uint8_t *src2 = srcimg[2];
uint8_t *dstu = image_u + imgstride * (y >> 1) + (x >> 1);
uint8_t *dstv = image_v + imgstride * (y >> 1) + (x >> 1);
for (i = 0; i < h / 2; i++)
{
fast_memcpy(dstu, src1 , w >> 1);
fast_memcpy(dstv, src2, w >> 1);
src1 += stride[1];
src2 += stride[2];
dstu += imgstride;
dstv += imgstride;
}
}
break;
case IMGFMT_BGR24:
case IMGFMT_RGB24:
dst = rgb_buffer + (image_width * y + x) * 3;
for (i = 0; i < h; i++)
{
fast_memcpy(dst, src, w * 3);
src += stride[0];
dst += image_width * 3;
}
break;
}
return 0;
}
static int draw_frame(uint8_t * src[])
{
switch(using_format)
{
case IMGFMT_YV12:
// gets done in draw_slice
break;
case IMGFMT_BGR24:
case IMGFMT_RGB24:
fast_memcpy(rgb_buffer, src[0], image_width * image_height * 3);
break;
}
return 0;
}
static int query_format(uint32_t format)
{
if (Y4M_IS_INTERLACED)
{
/* When processing interlaced material we want to get the raw RGB
* data and do the YV12 conversion ourselves to have the chrominance
* information sampled correct. */
switch(format)
{
case IMGFMT_YV12:
return VFCAP_CSP_SUPPORTED|VFCAP_OSD|VFCAP_ACCEPT_STRIDE;
case IMGFMT_BGR|24:
case IMGFMT_RGB|24:
return VFCAP_CSP_SUPPORTED|VFCAP_CSP_SUPPORTED_BY_HW|VFCAP_OSD|VFCAP_ACCEPT_STRIDE;
}
}
else
{
switch(format)
{
case IMGFMT_YV12:
return VFCAP_CSP_SUPPORTED|VFCAP_CSP_SUPPORTED_BY_HW|VFCAP_OSD|VFCAP_ACCEPT_STRIDE;
case IMGFMT_BGR|24:
case IMGFMT_RGB|24:
return VFCAP_CSP_SUPPORTED|VFCAP_OSD|VFCAP_ACCEPT_STRIDE;
}
}
return 0;
}
// WARNING: config(...) also uses this
static void uninit(void)
{
if(image)
free(image);
image = NULL;
if(yuv_out)
fclose(yuv_out);
yuv_out = NULL;
if(rgb_buffer)
free(rgb_buffer);
rgb_buffer = NULL;
if(rgb_line_buffer)
free(rgb_line_buffer);
rgb_line_buffer = NULL;
if (yuv_filename)
free(yuv_filename);
yuv_filename = NULL;
image_width = 0;
image_height = 0;
image_fps = 0;
}
static void check_events(void)
{
}
static int preinit(const char *arg)
{
int il, il_bf;
opt_t subopts[] = {
{"interlaced", OPT_ARG_BOOL, &il, NULL},
{"interlaced_bf", OPT_ARG_BOOL, &il_bf, NULL},
{"file", OPT_ARG_MSTRZ, &yuv_filename, NULL},
{NULL}
};
il = 0;
il_bf = 0;
yuv_filename = strdup("stream.yuv");
if (subopt_parse(arg, subopts) != 0) {
mp_msg(MSGT_VO, MSGL_FATAL, MSGTR_VO_YUV4MPEG_UnknownSubDev, arg);
return -1;
}
config_interlace = Y4M_ILACE_NONE;
if (il)
config_interlace = Y4M_ILACE_TOP_FIRST;
if (il_bf)
config_interlace = Y4M_ILACE_BOTTOM_FIRST;
/* Inform user which output mode is used */
switch (config_interlace)
{
case Y4M_ILACE_TOP_FIRST:
mp_msg(MSGT_VO,MSGL_STATUS,
MSGTR_VO_YUV4MPEG_InterlacedTFFMode);
break;
case Y4M_ILACE_BOTTOM_FIRST:
mp_msg(MSGT_VO,MSGL_STATUS,
MSGTR_VO_YUV4MPEG_InterlacedBFFMode);
break;
default:
mp_msg(MSGT_VO,MSGL_STATUS,
MSGTR_VO_YUV4MPEG_ProgressiveMode);
break;
}
return 0;
}
static int control(uint32_t request, void *data, ...)
{
switch (request) {
case VOCTRL_QUERY_FORMAT:
return query_format(*((uint32_t*)data));
case VOCTRL_DUPLICATE_FRAME:
return write_last_frame();
}
return VO_NOTIMPL;
}
| gpl-2.0 |
RAZAW/social-media-linkedin | mingw64/share/doc/git-doc/git-revert.html | 36792 | <!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<!--[if IE]><meta http-equiv="X-UA-Compatible" content="IE=edge"><![endif]-->
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="generator" content="Asciidoctor 1.5.3">
<title>git-revert(1)</title>
<link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Open+Sans:300,300italic,400,400italic,600,600italic%7CNoto+Serif:400,400italic,700,700italic%7CDroid+Sans+Mono:400,700">
<style>
/* Asciidoctor default stylesheet | MIT License | http://asciidoctor.org */
/* Remove comment around @import statement below when using as a custom stylesheet */
/*@import "https://fonts.googleapis.com/css?family=Open+Sans:300,300italic,400,400italic,600,600italic%7CNoto+Serif:400,400italic,700,700italic%7CDroid+Sans+Mono:400,700";*/
article,aside,details,figcaption,figure,footer,header,hgroup,main,nav,section,summary{display:block}
audio,canvas,video{display:inline-block}
audio:not([controls]){display:none;height:0}
[hidden],template{display:none}
script{display:none!important}
html{font-family:sans-serif;-ms-text-size-adjust:100%;-webkit-text-size-adjust:100%}
body{margin:0}
a{background:transparent}
a:focus{outline:thin dotted}
a:active,a:hover{outline:0}
h1{font-size:2em;margin:.67em 0}
abbr[title]{border-bottom:1px dotted}
b,strong{font-weight:bold}
dfn{font-style:italic}
hr{-moz-box-sizing:content-box;box-sizing:content-box;height:0}
mark{background:#ff0;color:#000}
code,kbd,pre,samp{font-family:monospace;font-size:1em}
pre{white-space:pre-wrap}
q{quotes:"\201C" "\201D" "\2018" "\2019"}
small{font-size:80%}
sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}
sup{top:-.5em}
sub{bottom:-.25em}
img{border:0}
svg:not(:root){overflow:hidden}
figure{margin:0}
fieldset{border:1px solid silver;margin:0 2px;padding:.35em .625em .75em}
legend{border:0;padding:0}
button,input,select,textarea{font-family:inherit;font-size:100%;margin:0}
button,input{line-height:normal}
button,select{text-transform:none}
button,html input[type="button"],input[type="reset"],input[type="submit"]{-webkit-appearance:button;cursor:pointer}
button[disabled],html input[disabled]{cursor:default}
input[type="checkbox"],input[type="radio"]{box-sizing:border-box;padding:0}
input[type="search"]{-webkit-appearance:textfield;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;box-sizing:content-box}
input[type="search"]::-webkit-search-cancel-button,input[type="search"]::-webkit-search-decoration{-webkit-appearance:none}
button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}
textarea{overflow:auto;vertical-align:top}
table{border-collapse:collapse;border-spacing:0}
*,*:before,*:after{-moz-box-sizing:border-box;-webkit-box-sizing:border-box;box-sizing:border-box}
html,body{font-size:100%}
body{background:#fff;color:rgba(0,0,0,.8);padding:0;margin:0;font-family:"Noto Serif","DejaVu Serif",serif;font-weight:400;font-style:normal;line-height:1;position:relative;cursor:auto}
a:hover{cursor:pointer}
img,object,embed{max-width:100%;height:auto}
object,embed{height:100%}
img{-ms-interpolation-mode:bicubic}
.left{float:left!important}
.right{float:right!important}
.text-left{text-align:left!important}
.text-right{text-align:right!important}
.text-center{text-align:center!important}
.text-justify{text-align:justify!important}
.hide{display:none}
body{-webkit-font-smoothing:antialiased}
img,object,svg{display:inline-block;vertical-align:middle}
textarea{height:auto;min-height:50px}
select{width:100%}
.center{margin-left:auto;margin-right:auto}
.spread{width:100%}
p.lead,.paragraph.lead>p,#preamble>.sectionbody>.paragraph:first-of-type p{font-size:1.21875em;line-height:1.6}
.subheader,.admonitionblock td.content>.title,.audioblock>.title,.exampleblock>.title,.imageblock>.title,.listingblock>.title,.literalblock>.title,.stemblock>.title,.openblock>.title,.paragraph>.title,.quoteblock>.title,table.tableblock>.title,.verseblock>.title,.videoblock>.title,.dlist>.title,.olist>.title,.ulist>.title,.qlist>.title,.hdlist>.title{line-height:1.45;color:#7a2518;font-weight:400;margin-top:0;margin-bottom:.25em}
div,dl,dt,dd,ul,ol,li,h1,h2,h3,#toctitle,.sidebarblock>.content>.title,h4,h5,h6,pre,form,p,blockquote,th,td{margin:0;padding:0;direction:ltr}
a{color:#2156a5;text-decoration:underline;line-height:inherit}
a:hover,a:focus{color:#1d4b8f}
a img{border:none}
p{font-family:inherit;font-weight:400;font-size:1em;line-height:1.6;margin-bottom:1.25em;text-rendering:optimizeLegibility}
p aside{font-size:.875em;line-height:1.35;font-style:italic}
h1,h2,h3,#toctitle,.sidebarblock>.content>.title,h4,h5,h6{font-family:"Open Sans","DejaVu Sans",sans-serif;font-weight:300;font-style:normal;color:#ba3925;text-rendering:optimizeLegibility;margin-top:1em;margin-bottom:.5em;line-height:1.0125em}
h1 small,h2 small,h3 small,#toctitle small,.sidebarblock>.content>.title small,h4 small,h5 small,h6 small{font-size:60%;color:#e99b8f;line-height:0}
h1{font-size:2.125em}
h2{font-size:1.6875em}
h3,#toctitle,.sidebarblock>.content>.title{font-size:1.375em}
h4,h5{font-size:1.125em}
h6{font-size:1em}
hr{border:solid #ddddd8;border-width:1px 0 0;clear:both;margin:1.25em 0 1.1875em;height:0}
em,i{font-style:italic;line-height:inherit}
strong,b{font-weight:bold;line-height:inherit}
small{font-size:60%;line-height:inherit}
code{font-family:"Droid Sans Mono","DejaVu Sans Mono",monospace;font-weight:400;color:rgba(0,0,0,.9)}
ul,ol,dl{font-size:1em;line-height:1.6;margin-bottom:1.25em;list-style-position:outside;font-family:inherit}
ul,ol,ul.no-bullet,ol.no-bullet{margin-left:1.5em}
ul li ul,ul li ol{margin-left:1.25em;margin-bottom:0;font-size:1em}
ul.square li ul,ul.circle li ul,ul.disc li ul{list-style:inherit}
ul.square{list-style-type:square}
ul.circle{list-style-type:circle}
ul.disc{list-style-type:disc}
ul.no-bullet{list-style:none}
ol li ul,ol li ol{margin-left:1.25em;margin-bottom:0}
dl dt{margin-bottom:.3125em;font-weight:bold}
dl dd{margin-bottom:1.25em}
abbr,acronym{text-transform:uppercase;font-size:90%;color:rgba(0,0,0,.8);border-bottom:1px dotted #ddd;cursor:help}
abbr{text-transform:none}
blockquote{margin:0 0 1.25em;padding:.5625em 1.25em 0 1.1875em;border-left:1px solid #ddd}
blockquote cite{display:block;font-size:.9375em;color:rgba(0,0,0,.6)}
blockquote cite:before{content:"\2014 \0020"}
blockquote cite a,blockquote cite a:visited{color:rgba(0,0,0,.6)}
blockquote,blockquote p{line-height:1.6;color:rgba(0,0,0,.85)}
@media only screen and (min-width:768px){h1,h2,h3,#toctitle,.sidebarblock>.content>.title,h4,h5,h6{line-height:1.2}
h1{font-size:2.75em}
h2{font-size:2.3125em}
h3,#toctitle,.sidebarblock>.content>.title{font-size:1.6875em}
h4{font-size:1.4375em}}
table{background:#fff;margin-bottom:1.25em;border:solid 1px #dedede}
table thead,table tfoot{background:#f7f8f7;font-weight:bold}
table thead tr th,table thead tr td,table tfoot tr th,table tfoot tr td{padding:.5em .625em .625em;font-size:inherit;color:rgba(0,0,0,.8);text-align:left}
table tr th,table tr td{padding:.5625em .625em;font-size:inherit;color:rgba(0,0,0,.8)}
table tr.even,table tr.alt,table tr:nth-of-type(even){background:#f8f8f7}
table thead tr th,table tfoot tr th,table tbody tr td,table tr td,table tfoot tr td{display:table-cell;line-height:1.6}
body{tab-size:4}
h1,h2,h3,#toctitle,.sidebarblock>.content>.title,h4,h5,h6{line-height:1.2;word-spacing:-.05em}
h1 strong,h2 strong,h3 strong,#toctitle strong,.sidebarblock>.content>.title strong,h4 strong,h5 strong,h6 strong{font-weight:400}
.clearfix:before,.clearfix:after,.float-group:before,.float-group:after{content:" ";display:table}
.clearfix:after,.float-group:after{clear:both}
*:not(pre)>code{font-size:.9375em;font-style:normal!important;letter-spacing:0;padding:.1em .5ex;word-spacing:-.15em;background-color:#f7f7f8;-webkit-border-radius:4px;border-radius:4px;line-height:1.45;text-rendering:optimizeSpeed}
pre,pre>code{line-height:1.45;color:rgba(0,0,0,.9);font-family:"Droid Sans Mono","DejaVu Sans Mono",monospace;font-weight:400;text-rendering:optimizeSpeed}
.keyseq{color:rgba(51,51,51,.8)}
kbd{font-family:"Droid Sans Mono","DejaVu Sans Mono",monospace;display:inline-block;color:rgba(0,0,0,.8);font-size:.65em;line-height:1.45;background-color:#f7f7f7;border:1px solid #ccc;-webkit-border-radius:3px;border-radius:3px;-webkit-box-shadow:0 1px 0 rgba(0,0,0,.2),0 0 0 .1em white inset;box-shadow:0 1px 0 rgba(0,0,0,.2),0 0 0 .1em #fff inset;margin:0 .15em;padding:.2em .5em;vertical-align:middle;position:relative;top:-.1em;white-space:nowrap}
.keyseq kbd:first-child{margin-left:0}
.keyseq kbd:last-child{margin-right:0}
.menuseq,.menu{color:rgba(0,0,0,.8)}
b.button:before,b.button:after{position:relative;top:-1px;font-weight:400}
b.button:before{content:"[";padding:0 3px 0 2px}
b.button:after{content:"]";padding:0 2px 0 3px}
p a>code:hover{color:rgba(0,0,0,.9)}
#header,#content,#footnotes,#footer{width:100%;margin-left:auto;margin-right:auto;margin-top:0;margin-bottom:0;max-width:62.5em;*zoom:1;position:relative;padding-left:.9375em;padding-right:.9375em}
#header:before,#header:after,#content:before,#content:after,#footnotes:before,#footnotes:after,#footer:before,#footer:after{content:" ";display:table}
#header:after,#content:after,#footnotes:after,#footer:after{clear:both}
#content{margin-top:1.25em}
#content:before{content:none}
#header>h1:first-child{color:rgba(0,0,0,.85);margin-top:2.25rem;margin-bottom:0}
#header>h1:first-child+#toc{margin-top:8px;border-top:1px solid #ddddd8}
#header>h1:only-child,body.toc2 #header>h1:nth-last-child(2){border-bottom:1px solid #ddddd8;padding-bottom:8px}
#header .details{border-bottom:1px solid #ddddd8;line-height:1.45;padding-top:.25em;padding-bottom:.25em;padding-left:.25em;color:rgba(0,0,0,.6);display:-ms-flexbox;display:-webkit-flex;display:flex;-ms-flex-flow:row wrap;-webkit-flex-flow:row wrap;flex-flow:row wrap}
#header .details span:first-child{margin-left:-.125em}
#header .details span.email a{color:rgba(0,0,0,.85)}
#header .details br{display:none}
#header .details br+span:before{content:"\00a0\2013\00a0"}
#header .details br+span.author:before{content:"\00a0\22c5\00a0";color:rgba(0,0,0,.85)}
#header .details br+span#revremark:before{content:"\00a0|\00a0"}
#header #revnumber{text-transform:capitalize}
#header #revnumber:after{content:"\00a0"}
#content>h1:first-child:not([class]){color:rgba(0,0,0,.85);border-bottom:1px solid #ddddd8;padding-bottom:8px;margin-top:0;padding-top:1rem;margin-bottom:1.25rem}
#toc{border-bottom:1px solid #efefed;padding-bottom:.5em}
#toc>ul{margin-left:.125em}
#toc ul.sectlevel0>li>a{font-style:italic}
#toc ul.sectlevel0 ul.sectlevel1{margin:.5em 0}
#toc ul{font-family:"Open Sans","DejaVu Sans",sans-serif;list-style-type:none}
#toc li{line-height:1.3334;margin-top:.3334em}
#toc a{text-decoration:none}
#toc a:active{text-decoration:underline}
#toctitle{color:#7a2518;font-size:1.2em}
@media only screen and (min-width:768px){#toctitle{font-size:1.375em}
body.toc2{padding-left:15em;padding-right:0}
#toc.toc2{margin-top:0!important;background-color:#f8f8f7;position:fixed;width:15em;left:0;top:0;border-right:1px solid #efefed;border-top-width:0!important;border-bottom-width:0!important;z-index:1000;padding:1.25em 1em;height:100%;overflow:auto}
#toc.toc2 #toctitle{margin-top:0;margin-bottom:.8rem;font-size:1.2em}
#toc.toc2>ul{font-size:.9em;margin-bottom:0}
#toc.toc2 ul ul{margin-left:0;padding-left:1em}
#toc.toc2 ul.sectlevel0 ul.sectlevel1{padding-left:0;margin-top:.5em;margin-bottom:.5em}
body.toc2.toc-right{padding-left:0;padding-right:15em}
body.toc2.toc-right #toc.toc2{border-right-width:0;border-left:1px solid #efefed;left:auto;right:0}}
@media only screen and (min-width:1280px){body.toc2{padding-left:20em;padding-right:0}
#toc.toc2{width:20em}
#toc.toc2 #toctitle{font-size:1.375em}
#toc.toc2>ul{font-size:.95em}
#toc.toc2 ul ul{padding-left:1.25em}
body.toc2.toc-right{padding-left:0;padding-right:20em}}
#content #toc{border-style:solid;border-width:1px;border-color:#e0e0dc;margin-bottom:1.25em;padding:1.25em;background:#f8f8f7;-webkit-border-radius:4px;border-radius:4px}
#content #toc>:first-child{margin-top:0}
#content #toc>:last-child{margin-bottom:0}
#footer{max-width:100%;background-color:rgba(0,0,0,.8);padding:1.25em}
#footer-text{color:rgba(255,255,255,.8);line-height:1.44}
.sect1{padding-bottom:.625em}
@media only screen and (min-width:768px){.sect1{padding-bottom:1.25em}}
.sect1+.sect1{border-top:1px solid #efefed}
#content h1>a.anchor,h2>a.anchor,h3>a.anchor,#toctitle>a.anchor,.sidebarblock>.content>.title>a.anchor,h4>a.anchor,h5>a.anchor,h6>a.anchor{position:absolute;z-index:1001;width:1.5ex;margin-left:-1.5ex;display:block;text-decoration:none!important;visibility:hidden;text-align:center;font-weight:400}
#content h1>a.anchor:before,h2>a.anchor:before,h3>a.anchor:before,#toctitle>a.anchor:before,.sidebarblock>.content>.title>a.anchor:before,h4>a.anchor:before,h5>a.anchor:before,h6>a.anchor:before{content:"\00A7";font-size:.85em;display:block;padding-top:.1em}
#content h1:hover>a.anchor,#content h1>a.anchor:hover,h2:hover>a.anchor,h2>a.anchor:hover,h3:hover>a.anchor,#toctitle:hover>a.anchor,.sidebarblock>.content>.title:hover>a.anchor,h3>a.anchor:hover,#toctitle>a.anchor:hover,.sidebarblock>.content>.title>a.anchor:hover,h4:hover>a.anchor,h4>a.anchor:hover,h5:hover>a.anchor,h5>a.anchor:hover,h6:hover>a.anchor,h6>a.anchor:hover{visibility:visible}
#content h1>a.link,h2>a.link,h3>a.link,#toctitle>a.link,.sidebarblock>.content>.title>a.link,h4>a.link,h5>a.link,h6>a.link{color:#ba3925;text-decoration:none}
#content h1>a.link:hover,h2>a.link:hover,h3>a.link:hover,#toctitle>a.link:hover,.sidebarblock>.content>.title>a.link:hover,h4>a.link:hover,h5>a.link:hover,h6>a.link:hover{color:#a53221}
.audioblock,.imageblock,.literalblock,.listingblock,.stemblock,.videoblock{margin-bottom:1.25em}
.admonitionblock td.content>.title,.audioblock>.title,.exampleblock>.title,.imageblock>.title,.listingblock>.title,.literalblock>.title,.stemblock>.title,.openblock>.title,.paragraph>.title,.quoteblock>.title,table.tableblock>.title,.verseblock>.title,.videoblock>.title,.dlist>.title,.olist>.title,.ulist>.title,.qlist>.title,.hdlist>.title{text-rendering:optimizeLegibility;text-align:left;font-family:"Noto Serif","DejaVu Serif",serif;font-size:1rem;font-style:italic}
table.tableblock>caption.title{white-space:nowrap;overflow:visible;max-width:0}
.paragraph.lead>p,#preamble>.sectionbody>.paragraph:first-of-type p{color:rgba(0,0,0,.85)}
table.tableblock #preamble>.sectionbody>.paragraph:first-of-type p{font-size:inherit}
.admonitionblock>table{border-collapse:separate;border:0;background:none;width:100%}
.admonitionblock>table td.icon{text-align:center;width:80px}
.admonitionblock>table td.icon img{max-width:none}
.admonitionblock>table td.icon .title{font-weight:bold;font-family:"Open Sans","DejaVu Sans",sans-serif;text-transform:uppercase}
.admonitionblock>table td.content{padding-left:1.125em;padding-right:1.25em;border-left:1px solid #ddddd8;color:rgba(0,0,0,.6)}
.admonitionblock>table td.content>:last-child>:last-child{margin-bottom:0}
.exampleblock>.content{border-style:solid;border-width:1px;border-color:#e6e6e6;margin-bottom:1.25em;padding:1.25em;background:#fff;-webkit-border-radius:4px;border-radius:4px}
.exampleblock>.content>:first-child{margin-top:0}
.exampleblock>.content>:last-child{margin-bottom:0}
.sidebarblock{border-style:solid;border-width:1px;border-color:#e0e0dc;margin-bottom:1.25em;padding:1.25em;background:#f8f8f7;-webkit-border-radius:4px;border-radius:4px}
.sidebarblock>:first-child{margin-top:0}
.sidebarblock>:last-child{margin-bottom:0}
.sidebarblock>.content>.title{color:#7a2518;margin-top:0;text-align:center}
.exampleblock>.content>:last-child>:last-child,.exampleblock>.content .olist>ol>li:last-child>:last-child,.exampleblock>.content .ulist>ul>li:last-child>:last-child,.exampleblock>.content .qlist>ol>li:last-child>:last-child,.sidebarblock>.content>:last-child>:last-child,.sidebarblock>.content .olist>ol>li:last-child>:last-child,.sidebarblock>.content .ulist>ul>li:last-child>:last-child,.sidebarblock>.content .qlist>ol>li:last-child>:last-child{margin-bottom:0}
.literalblock pre,.listingblock pre:not(.highlight),.listingblock pre[class="highlight"],.listingblock pre[class^="highlight "],.listingblock pre.CodeRay,.listingblock pre.prettyprint{background:#f7f7f8}
.sidebarblock .literalblock pre,.sidebarblock .listingblock pre:not(.highlight),.sidebarblock .listingblock pre[class="highlight"],.sidebarblock .listingblock pre[class^="highlight "],.sidebarblock .listingblock pre.CodeRay,.sidebarblock .listingblock pre.prettyprint{background:#f2f1f1}
.literalblock pre,.literalblock pre[class],.listingblock pre,.listingblock pre[class]{-webkit-border-radius:4px;border-radius:4px;word-wrap:break-word;padding:1em;font-size:.8125em}
.literalblock pre.nowrap,.literalblock pre[class].nowrap,.listingblock pre.nowrap,.listingblock pre[class].nowrap{overflow-x:auto;white-space:pre;word-wrap:normal}
@media only screen and (min-width:768px){.literalblock pre,.literalblock pre[class],.listingblock pre,.listingblock pre[class]{font-size:.90625em}}
@media only screen and (min-width:1280px){.literalblock pre,.literalblock pre[class],.listingblock pre,.listingblock pre[class]{font-size:1em}}
.literalblock.output pre{color:#f7f7f8;background-color:rgba(0,0,0,.9)}
.listingblock pre.highlightjs{padding:0}
.listingblock pre.highlightjs>code{padding:1em;-webkit-border-radius:4px;border-radius:4px}
.listingblock pre.prettyprint{border-width:0}
.listingblock>.content{position:relative}
.listingblock code[data-lang]:before{display:none;content:attr(data-lang);position:absolute;font-size:.75em;top:.425rem;right:.5rem;line-height:1;text-transform:uppercase;color:#999}
.listingblock:hover code[data-lang]:before{display:block}
.listingblock.terminal pre .command:before{content:attr(data-prompt);padding-right:.5em;color:#999}
.listingblock.terminal pre .command:not([data-prompt]):before{content:"$"}
table.pyhltable{border-collapse:separate;border:0;margin-bottom:0;background:none}
table.pyhltable td{vertical-align:top;padding-top:0;padding-bottom:0;line-height:1.45}
table.pyhltable td.code{padding-left:.75em;padding-right:0}
pre.pygments .lineno,table.pyhltable td:not(.code){color:#999;padding-left:0;padding-right:.5em;border-right:1px solid #ddddd8}
pre.pygments .lineno{display:inline-block;margin-right:.25em}
table.pyhltable .linenodiv{background:none!important;padding-right:0!important}
.quoteblock{margin:0 1em 1.25em 1.5em;display:table}
.quoteblock>.title{margin-left:-1.5em;margin-bottom:.75em}
.quoteblock blockquote,.quoteblock blockquote p{color:rgba(0,0,0,.85);font-size:1.15rem;line-height:1.75;word-spacing:.1em;letter-spacing:0;font-style:italic;text-align:justify}
.quoteblock blockquote{margin:0;padding:0;border:0}
.quoteblock blockquote:before{content:"\201c";float:left;font-size:2.75em;font-weight:bold;line-height:.6em;margin-left:-.6em;color:#7a2518;text-shadow:0 1px 2px rgba(0,0,0,.1)}
.quoteblock blockquote>.paragraph:last-child p{margin-bottom:0}
.quoteblock .attribution{margin-top:.5em;margin-right:.5ex;text-align:right}
.quoteblock .quoteblock{margin-left:0;margin-right:0;padding:.5em 0;border-left:3px solid rgba(0,0,0,.6)}
.quoteblock .quoteblock blockquote{padding:0 0 0 .75em}
.quoteblock .quoteblock blockquote:before{display:none}
.verseblock{margin:0 1em 1.25em 1em}
.verseblock pre{font-family:"Open Sans","DejaVu Sans",sans;font-size:1.15rem;color:rgba(0,0,0,.85);font-weight:300;text-rendering:optimizeLegibility}
.verseblock pre strong{font-weight:400}
.verseblock .attribution{margin-top:1.25rem;margin-left:.5ex}
.quoteblock .attribution,.verseblock .attribution{font-size:.9375em;line-height:1.45;font-style:italic}
.quoteblock .attribution br,.verseblock .attribution br{display:none}
.quoteblock .attribution cite,.verseblock .attribution cite{display:block;letter-spacing:-.025em;color:rgba(0,0,0,.6)}
.quoteblock.abstract{margin:0 0 1.25em 0;display:block}
.quoteblock.abstract blockquote,.quoteblock.abstract blockquote p{text-align:left;word-spacing:0}
.quoteblock.abstract blockquote:before,.quoteblock.abstract blockquote p:first-of-type:before{display:none}
table.tableblock{max-width:100%;border-collapse:separate}
table.tableblock td>.paragraph:last-child p>p:last-child,table.tableblock th>p:last-child,table.tableblock td>p:last-child{margin-bottom:0}
table.tableblock,th.tableblock,td.tableblock{border:0 solid #dedede}
table.grid-all th.tableblock,table.grid-all td.tableblock{border-width:0 1px 1px 0}
table.grid-all tfoot>tr>th.tableblock,table.grid-all tfoot>tr>td.tableblock{border-width:1px 1px 0 0}
table.grid-cols th.tableblock,table.grid-cols td.tableblock{border-width:0 1px 0 0}
table.grid-all *>tr>.tableblock:last-child,table.grid-cols *>tr>.tableblock:last-child{border-right-width:0}
table.grid-rows th.tableblock,table.grid-rows td.tableblock{border-width:0 0 1px 0}
table.grid-all tbody>tr:last-child>th.tableblock,table.grid-all tbody>tr:last-child>td.tableblock,table.grid-all thead:last-child>tr>th.tableblock,table.grid-rows tbody>tr:last-child>th.tableblock,table.grid-rows tbody>tr:last-child>td.tableblock,table.grid-rows thead:last-child>tr>th.tableblock{border-bottom-width:0}
table.grid-rows tfoot>tr>th.tableblock,table.grid-rows tfoot>tr>td.tableblock{border-width:1px 0 0 0}
table.frame-all{border-width:1px}
table.frame-sides{border-width:0 1px}
table.frame-topbot{border-width:1px 0}
th.halign-left,td.halign-left{text-align:left}
th.halign-right,td.halign-right{text-align:right}
th.halign-center,td.halign-center{text-align:center}
th.valign-top,td.valign-top{vertical-align:top}
th.valign-bottom,td.valign-bottom{vertical-align:bottom}
th.valign-middle,td.valign-middle{vertical-align:middle}
table thead th,table tfoot th{font-weight:bold}
tbody tr th{display:table-cell;line-height:1.6;background:#f7f8f7}
tbody tr th,tbody tr th p,tfoot tr th,tfoot tr th p{color:rgba(0,0,0,.8);font-weight:bold}
p.tableblock>code:only-child{background:none;padding:0}
p.tableblock{font-size:1em}
td>div.verse{white-space:pre}
ol{margin-left:1.75em}
ul li ol{margin-left:1.5em}
dl dd{margin-left:1.125em}
dl dd:last-child,dl dd:last-child>:last-child{margin-bottom:0}
ol>li p,ul>li p,ul dd,ol dd,.olist .olist,.ulist .ulist,.ulist .olist,.olist .ulist{margin-bottom:.625em}
ul.unstyled,ol.unnumbered,ul.checklist,ul.none{list-style-type:none}
ul.unstyled,ol.unnumbered,ul.checklist{margin-left:.625em}
ul.checklist li>p:first-child>.fa-square-o:first-child,ul.checklist li>p:first-child>.fa-check-square-o:first-child{width:1em;font-size:.85em}
ul.checklist li>p:first-child>input[type="checkbox"]:first-child{width:1em;position:relative;top:1px}
ul.inline{margin:0 auto .625em auto;margin-left:-1.375em;margin-right:0;padding:0;list-style:none;overflow:hidden}
ul.inline>li{list-style:none;float:left;margin-left:1.375em;display:block}
ul.inline>li>*{display:block}
.unstyled dl dt{font-weight:400;font-style:normal}
ol.arabic{list-style-type:decimal}
ol.decimal{list-style-type:decimal-leading-zero}
ol.loweralpha{list-style-type:lower-alpha}
ol.upperalpha{list-style-type:upper-alpha}
ol.lowerroman{list-style-type:lower-roman}
ol.upperroman{list-style-type:upper-roman}
ol.lowergreek{list-style-type:lower-greek}
.hdlist>table,.colist>table{border:0;background:none}
.hdlist>table>tbody>tr,.colist>table>tbody>tr{background:none}
td.hdlist1,td.hdlist2{vertical-align:top;padding:0 .625em}
td.hdlist1{font-weight:bold;padding-bottom:1.25em}
.literalblock+.colist,.listingblock+.colist{margin-top:-.5em}
.colist>table tr>td:first-of-type{padding:0 .75em;line-height:1}
.colist>table tr>td:last-of-type{padding:.25em 0}
.thumb,.th{line-height:0;display:inline-block;border:solid 4px #fff;-webkit-box-shadow:0 0 0 1px #ddd;box-shadow:0 0 0 1px #ddd}
.imageblock.left,.imageblock[style*="float: left"]{margin:.25em .625em 1.25em 0}
.imageblock.right,.imageblock[style*="float: right"]{margin:.25em 0 1.25em .625em}
.imageblock>.title{margin-bottom:0}
.imageblock.thumb,.imageblock.th{border-width:6px}
.imageblock.thumb>.title,.imageblock.th>.title{padding:0 .125em}
.image.left,.image.right{margin-top:.25em;margin-bottom:.25em;display:inline-block;line-height:0}
.image.left{margin-right:.625em}
.image.right{margin-left:.625em}
a.image{text-decoration:none;display:inline-block}
a.image object{pointer-events:none}
sup.footnote,sup.footnoteref{font-size:.875em;position:static;vertical-align:super}
sup.footnote a,sup.footnoteref a{text-decoration:none}
sup.footnote a:active,sup.footnoteref a:active{text-decoration:underline}
#footnotes{padding-top:.75em;padding-bottom:.75em;margin-bottom:.625em}
#footnotes hr{width:20%;min-width:6.25em;margin:-.25em 0 .75em 0;border-width:1px 0 0 0}
#footnotes .footnote{padding:0 .375em 0 .225em;line-height:1.3334;font-size:.875em;margin-left:1.2em;text-indent:-1.05em;margin-bottom:.2em}
#footnotes .footnote a:first-of-type{font-weight:bold;text-decoration:none}
#footnotes .footnote:last-of-type{margin-bottom:0}
#content #footnotes{margin-top:-.625em;margin-bottom:0;padding:.75em 0}
.gist .file-data>table{border:0;background:#fff;width:100%;margin-bottom:0}
.gist .file-data>table td.line-data{width:99%}
div.unbreakable{page-break-inside:avoid}
.big{font-size:larger}
.small{font-size:smaller}
.underline{text-decoration:underline}
.overline{text-decoration:overline}
.line-through{text-decoration:line-through}
.aqua{color:#00bfbf}
.aqua-background{background-color:#00fafa}
.black{color:#000}
.black-background{background-color:#000}
.blue{color:#0000bf}
.blue-background{background-color:#0000fa}
.fuchsia{color:#bf00bf}
.fuchsia-background{background-color:#fa00fa}
.gray{color:#606060}
.gray-background{background-color:#7d7d7d}
.green{color:#006000}
.green-background{background-color:#007d00}
.lime{color:#00bf00}
.lime-background{background-color:#00fa00}
.maroon{color:#600000}
.maroon-background{background-color:#7d0000}
.navy{color:#000060}
.navy-background{background-color:#00007d}
.olive{color:#606000}
.olive-background{background-color:#7d7d00}
.purple{color:#600060}
.purple-background{background-color:#7d007d}
.red{color:#bf0000}
.red-background{background-color:#fa0000}
.silver{color:#909090}
.silver-background{background-color:#bcbcbc}
.teal{color:#006060}
.teal-background{background-color:#007d7d}
.white{color:#bfbfbf}
.white-background{background-color:#fafafa}
.yellow{color:#bfbf00}
.yellow-background{background-color:#fafa00}
span.icon>.fa{cursor:default}
.admonitionblock td.icon [class^="fa icon-"]{font-size:2.5em;text-shadow:1px 1px 2px rgba(0,0,0,.5);cursor:default}
.admonitionblock td.icon .icon-note:before{content:"\f05a";color:#19407c}
.admonitionblock td.icon .icon-tip:before{content:"\f0eb";text-shadow:1px 1px 2px rgba(155,155,0,.8);color:#111}
.admonitionblock td.icon .icon-warning:before{content:"\f071";color:#bf6900}
.admonitionblock td.icon .icon-caution:before{content:"\f06d";color:#bf3400}
.admonitionblock td.icon .icon-important:before{content:"\f06a";color:#bf0000}
.conum[data-value]{display:inline-block;color:#fff!important;background-color:rgba(0,0,0,.8);-webkit-border-radius:100px;border-radius:100px;text-align:center;font-size:.75em;width:1.67em;height:1.67em;line-height:1.67em;font-family:"Open Sans","DejaVu Sans",sans-serif;font-style:normal;font-weight:bold}
.conum[data-value] *{color:#fff!important}
.conum[data-value]+b{display:none}
.conum[data-value]:after{content:attr(data-value)}
pre .conum[data-value]{position:relative;top:-.125em}
b.conum *{color:inherit!important}
.conum:not([data-value]):empty{display:none}
dt,th.tableblock,td.content,div.footnote{text-rendering:optimizeLegibility}
h1,h2,p,td.content,span.alt{letter-spacing:-.01em}
p strong,td.content strong,div.footnote strong{letter-spacing:-.005em}
p,blockquote,dt,td.content,span.alt{font-size:1.0625rem}
p{margin-bottom:1.25rem}
.sidebarblock p,.sidebarblock dt,.sidebarblock td.content,p.tableblock{font-size:1em}
.exampleblock>.content{background-color:#fffef7;border-color:#e0e0dc;-webkit-box-shadow:0 1px 4px #e0e0dc;box-shadow:0 1px 4px #e0e0dc}
.print-only{display:none!important}
@media print{@page{margin:1.25cm .75cm}
*{-webkit-box-shadow:none!important;box-shadow:none!important;text-shadow:none!important}
a{color:inherit!important;text-decoration:underline!important}
a.bare,a[href^="#"],a[href^="mailto:"]{text-decoration:none!important}
a[href^="http:"]:not(.bare):after,a[href^="https:"]:not(.bare):after{content:"(" attr(href) ")";display:inline-block;font-size:.875em;padding-left:.25em}
abbr[title]:after{content:" (" attr(title) ")"}
pre,blockquote,tr,img,object,svg{page-break-inside:avoid}
thead{display:table-header-group}
svg{max-width:100%}
p,blockquote,dt,td.content{font-size:1em;orphans:3;widows:3}
h2,h3,#toctitle,.sidebarblock>.content>.title{page-break-after:avoid}
#toc,.sidebarblock,.exampleblock>.content{background:none!important}
#toc{border-bottom:1px solid #ddddd8!important;padding-bottom:0!important}
.sect1{padding-bottom:0!important}
.sect1+.sect1{border:0!important}
#header>h1:first-child{margin-top:1.25rem}
body.book #header{text-align:center}
body.book #header>h1:first-child{border:0!important;margin:2.5em 0 1em 0}
body.book #header .details{border:0!important;display:block;padding:0!important}
body.book #header .details span:first-child{margin-left:0!important}
body.book #header .details br{display:block}
body.book #header .details br+span:before{content:none!important}
body.book #toc{border:0!important;text-align:left!important;padding:0!important;margin:0!important}
body.book #toc,body.book #preamble,body.book h1.sect0,body.book .sect1>h2{page-break-before:always}
.listingblock code[data-lang]:before{display:block}
#footer{background:none!important;padding:0 .9375em}
#footer-text{color:rgba(0,0,0,.6)!important;font-size:.9em}
.hide-on-print{display:none!important}
.print-only{display:block!important}
.hide-for-print{display:none!important}
.show-for-print{display:inherit!important}}
</style>
</head>
<body class="manpage">
<div id="header">
<h1>git-revert(1) Manual Page</h1>
<h2>NAME</h2>
<div class="sectionbody">
<p>git-revert - Revert some existing commits</p>
</div>
</div>
<div id="content">
<div class="sect1">
<h2 id="_synopsis">SYNOPSIS</h2>
<div class="sectionbody">
<div class="verseblock">
<pre class="content"><em>git revert</em> [--[no-]edit] [-n] [-m parent-number] [-s] [-S[<keyid>]] <commit>…​
<em>git revert</em> --continue
<em>git revert</em> --quit
<em>git revert</em> --abort</pre>
</div>
</div>
</div>
<div class="sect1">
<h2 id="_description">DESCRIPTION</h2>
<div class="sectionbody">
<div class="paragraph">
<p>Given one or more existing commits, revert the changes that the
related patches introduce, and record some new commits that record
them. This requires your working tree to be clean (no modifications
from the HEAD commit).</p>
</div>
<div class="paragraph">
<p>Note: <em>git revert</em> is used to record some new commits to reverse the
effect of some earlier commits (often only a faulty one). If you want to
throw away all uncommitted changes in your working directory, you
should see <a href="git-reset.html">git-reset</a>(1), particularly the <em>--hard</em> option. If
you want to extract specific files as they were in another commit, you
should see <a href="git-checkout.html">git-checkout</a>(1), specifically the <code>git checkout
<commit> -- <filename></code> syntax. Take care with these alternatives as
both will discard uncommitted changes in your working directory.</p>
</div>
</div>
</div>
<div class="sect1">
<h2 id="_options">OPTIONS</h2>
<div class="sectionbody">
<div class="dlist">
<dl>
<dt class="hdlist1"><commit>…​</dt>
<dd>
<p>Commits to revert.
For a more complete list of ways to spell commit names, see
<a href="gitrevisions.html">gitrevisions</a>(7).
Sets of commits can also be given but no traversal is done by
default, see <a href="git-rev-list.html">git-rev-list</a>(1) and its <em>--no-walk</em>
option.</p>
</dd>
<dt class="hdlist1">-e</dt>
<dt class="hdlist1">--edit</dt>
<dd>
<p>With this option, <em>git revert</em> will let you edit the commit
message prior to committing the revert. This is the default if
you run the command from a terminal.</p>
</dd>
<dt class="hdlist1">-m parent-number</dt>
<dt class="hdlist1">--mainline parent-number</dt>
<dd>
<p>Usually you cannot revert a merge because you do not know which
side of the merge should be considered the mainline. This
option specifies the parent number (starting from 1) of
the mainline and allows revert to reverse the change
relative to the specified parent.</p>
<div class="paragraph">
<p>Reverting a merge commit declares that you will never want the tree changes
brought in by the merge. As a result, later merges will only bring in tree
changes introduced by commits that are not ancestors of the previously
reverted merge. This may or may not be what you want.</p>
</div>
<div class="paragraph">
<p>See the <a href="howto/revert-a-faulty-merge.html">revert-a-faulty-merge How-To</a> for
more details.</p>
</div>
</dd>
<dt class="hdlist1">--no-edit</dt>
<dd>
<p>With this option, <em>git revert</em> will not start the commit
message editor.</p>
</dd>
<dt class="hdlist1">-n</dt>
<dt class="hdlist1">--no-commit</dt>
<dd>
<p>Usually the command automatically creates some commits with
commit log messages stating which commits were
reverted. This flag applies the changes necessary
to revert the named commits to your working tree
and the index, but does not make the commits. In addition,
when this option is used, your index does not have to match
the HEAD commit. The revert is done against the
beginning state of your index.</p>
<div class="paragraph">
<p>This is useful when reverting more than one commits'
effect to your index in a row.</p>
</div>
</dd>
<dt class="hdlist1">-S[<keyid>]</dt>
<dt class="hdlist1">--gpg-sign[=<keyid>]</dt>
<dd>
<p>GPG-sign commits. The <code>keyid</code> argument is optional and
defaults to the committer identity; if specified, it must be
stuck to the option without a space.</p>
</dd>
<dt class="hdlist1">-s</dt>
<dt class="hdlist1">--signoff</dt>
<dd>
<p>Add Signed-off-by line at the end of the commit message.
See the signoff option in <a href="git-commit.html">git-commit</a>(1) for more information.</p>
</dd>
<dt class="hdlist1">--strategy=<strategy></dt>
<dd>
<p>Use the given merge strategy. Should only be used once.
See the MERGE STRATEGIES section in <a href="git-merge.html">git-merge</a>(1)
for details.</p>
</dd>
<dt class="hdlist1">-X<option></dt>
<dt class="hdlist1">--strategy-option=<option></dt>
<dd>
<p>Pass the merge strategy-specific option through to the
merge strategy. See <a href="git-merge.html">git-merge</a>(1) for details.</p>
</dd>
</dl>
</div>
</div>
</div>
<div class="sect1">
<h2 id="_sequencer_subcommands">SEQUENCER SUBCOMMANDS</h2>
<div class="sectionbody">
<div class="dlist">
<dl>
<dt class="hdlist1">--continue</dt>
<dd>
<p>Continue the operation in progress using the information in
<em>.git/sequencer</em>. Can be used to continue after resolving
conflicts in a failed cherry-pick or revert.</p>
</dd>
<dt class="hdlist1">--quit</dt>
<dd>
<p>Forget about the current operation in progress. Can be used
to clear the sequencer state after a failed cherry-pick or
revert.</p>
</dd>
<dt class="hdlist1">--abort</dt>
<dd>
<p>Cancel the operation and return to the pre-sequence state.</p>
</dd>
</dl>
</div>
</div>
</div>
<div class="sect1">
<h2 id="_examples">EXAMPLES</h2>
<div class="sectionbody">
<div class="dlist">
<dl>
<dt class="hdlist1"><code>git revert HEAD~3</code></dt>
<dd>
<p>Revert the changes specified by the fourth last commit in HEAD
and create a new commit with the reverted changes.</p>
</dd>
<dt class="hdlist1"><code>git revert -n master~5..master~2</code></dt>
<dd>
<p>Revert the changes done by commits from the fifth last commit
in master (included) to the third last commit in master
(included), but do not create any commit with the reverted
changes. The revert only modifies the working tree and the
index.</p>
</dd>
</dl>
</div>
</div>
</div>
<div class="sect1">
<h2 id="_see_also">SEE ALSO</h2>
<div class="sectionbody">
<div class="paragraph">
<p><a href="git-cherry-pick.html">git-cherry-pick</a>(1)</p>
</div>
</div>
</div>
<div class="sect1">
<h2 id="_git">GIT</h2>
<div class="sectionbody">
<div class="paragraph">
<p>Part of the <a href="git.html">git</a>(1) suite</p>
</div>
</div>
</div>
</div>
<div id="footer">
<div id="footer-text">
Last updated 2016-02-06 16:54:45 W. Europe Standard Time
</div>
</div>
</body>
</html> | gpl-2.0 |
jimm/chuck | src/chuck_def.h | 5914 | /*----------------------------------------------------------------------------
ChucK Concurrent, On-the-fly Audio Programming Language
Compiler and Virtual Machine
Copyright (c) 2004 Ge Wang and Perry R. Cook. All rights reserved.
http://chuck.cs.princeton.edu/
http://soundlab.cs.princeton.edu/
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
U.S.A.
-----------------------------------------------------------------------------*/
//-----------------------------------------------------------------------------
// file: chuck_def.h
// desc: ...
//
// author: Ge Wang ([email protected])
// Perry R. Cook ([email protected])
// date: Autumn 2002
//-----------------------------------------------------------------------------
#ifndef __CHUCK_DEF_H__
#define __CHUCK_DEF_H__
#include <stdlib.h>
#include <memory.h>
#include <assert.h>
// types
#define t_CKTIME double
#define t_CKDUR double
#define t_CKFLOAT double
#define t_CKDOUBLE double
#define t_CKSINGLE float
#define t_CKINT long
#define t_CKDWORD unsigned long
#define t_CKUINT t_CKDWORD
#define t_CKBOOL t_CKDWORD
#define t_CKBYTE unsigned char
#define t_CKVOID void
#define t_CKVOIDPTR void *
// complex type
typedef struct { t_CKFLOAT re ; t_CKFLOAT im ; } t_CKCOMPLEX;
// polar type
typedef struct { t_CKFLOAT modulus ; t_CKFLOAT phase ; } t_CKPOLAR;
// size
#define sz_TIME sizeof(double)
#define sz_DUR sizeof(double)
#define sz_FLOAT sizeof(double)
#define sz_DOUBLE sizeof(double)
#define sz_SINGLE sizeof(float)
#define sz_INT sizeof(long)
#define sz_DWORD sizeof(unsigned long)
#define sz_UINT sizeof(t_CKDWORD)
#define sz_BOOL sizeof(t_CKDWORD)
#define sz_BYTE sizeof(unsigned char)
#define sz_VOID 0
#define sz_VOIDPTR sizeof(void *)
#define sz_COMPLEX sizeof(t_CKCOMPLEX)
#define sz_POLAR sizeof(t_CKPOLAR)
typedef char * c_str;
typedef const char * c_constr;
// double
// #define CK_S_DOUBLE
// sample
#ifdef CK_S_DOUBLE
#define SAMPLE double
#define SILENCE 0.0
#define CK_DDN CK_DDN_DOUBLE
#else
#define SAMPLE float
#define SILENCE 0.0f
#define CK_DDN CK_DDN_SINGLE
#endif
// sample complex
typedef struct { SAMPLE re ; SAMPLE im ; } t_CKCOMPLEX_SAMPLE;
// bool
#ifndef TRUE
#define TRUE 1
#define FALSE 0
#endif
// 3.1415926535897932384626433832795028841971693993751058209749445...
#define ONE_PI (3.14159265358979323846)
#define TWO_PI (2.0 * ONE_PI)
#define SQRT2 (1.41421356237309504880)
#ifndef SAFE_DELETE
#define SAFE_DELETE(x) do { if(x){ delete x; x = NULL; } } while(0)
#define SAFE_DELETE_ARRAY(x) do { if(x){ delete [] x; x = NULL; } } while(0)
#define SAFE_RELEASE(x) do { if(x){ x->release(); x = NULL; } } while(0)
#define SAFE_ADD_REF(x) do { if(x){ x->add_ref(); } } while(0)
#define SAFE_REF_ASSIGN(lhs,rhs) do { SAFE_RELEASE(lhs); (lhs) = (rhs); SAFE_ADD_REF(lhs); } while(0)
#endif
// max + min
#define ck_max(x,y) ( (x) >= (y) ? (x) : (y) )
#define ck_min(x,y) ( (x) <= (y) ? (x) : (y) )
// dedenormal
#define CK_DDN_SINGLE(f) f = ( f >= 0 ? \
( ( f > (t_CKSINGLE)1e-15 && f < (t_CKSINGLE)1e15 ) ? f : (t_CKSINGLE)0.0 ) : \
( ( f < (t_CKSINGLE)-1e-15 && f > (t_CKSINGLE)-1e15 ) ? f : (t_CKSINGLE)0.0 ) )
#define CK_DDN_DOUBLE(f) f = ( f >= 0 ? \
( ( f > (t_CKDOUBLE)1e-15 && f < (t_CKDOUBLE)1e15 ) ? f : 0.0 ) : \
( ( f < (t_CKDOUBLE)-1e-15 && f > (t_CKDOUBLE)-1e15 ) ? f : 0.0 ) )
// tracking
#if defined(__CHUCK_STAT_TRACK__)
#define CK_TRACK( stmt ) stmt
#else
#define CK_TRACK( stmt )
#endif
#ifdef __MACOSX_CORE__
#define __PLATFORM_MACOSX__
#endif
#if defined(__LINUX_ALSA__) || defined(__LINUX_JACK__) || defined(__LINUX_OSS__)
#define __PLATFORM_LINUX__
#endif
#ifdef __PLATFORM_WIN32__
#ifndef usleep
#define usleep(x) Sleep( (x / 1000 <= 0 ? 1 : x / 1000) )
#endif
#pragma warning (disable : 4996) // stdio deprecation
#pragma warning (disable : 4786) // stl debug info
#pragma warning (disable : 4312) // type casts from void*
#pragma warning (disable : 4311) // type casts to void*
#pragma warning (disable : 4244) // truncation
#pragma warning (disable : 4068) // unknown pragma
#endif
#ifdef __CHIP_MODE__
#define __DISABLE_MIDI__
#define __DISABLE_SNDBUF__
#define __DISABLE_WATCHDOG__
#define __DISABLE_RAW__
#define __DISABLE_KBHIT__
#define __DISABLE_PROMPTER__
#define __DISABLE_RTAUDIO__
#define __DISABLE_OTF_SERVER__
#define __ALTER_HID__
#define __ALTER_ENTRY_POINT__
#define __STK_USE_SINGLE_PRECISION__
#endif
#endif
| gpl-2.0 |
olesalscheider/ktp-call-ui | libktpcall/private/tf-channel-handler.h | 2462 | /*
Copyright (C) 2011 Collabora Ltd. <[email protected]>
Copyright (C) 2012 George Kiagiadakis <[email protected]>
This library is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published
by the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TF_CHANNEL_HANDLER_H
#define TF_CHANNEL_HANDLER_H
#include "tf-content-handler-factory.h"
#include <QList>
#include <QHash>
#include <QGst/Pipeline>
namespace KTpCallPrivate {
class TfChannelHandler : public QObject
{
Q_OBJECT
public:
explicit TfChannelHandler(const Tp::CallChannelPtr & channel,
TfContentHandlerFactory::Constructor factoryCtor,
QObject *parent = 0);
virtual ~TfChannelHandler();
Tp::CallChannelPtr callChannel() const { return m_callChannel; }
QTf::ChannelPtr tfChannel() const { return m_tfChannel; }
QGst::PipelinePtr pipeline() const { return m_pipeline; }
void shutdown();
Q_SIGNALS:
void channelClosed();
void contentAdded(KTpCallPrivate::TfContentHandler*);
void contentRemoved(KTpCallPrivate::TfContentHandler*);
private Q_SLOTS:
void init();
void onPendingTfChannelFinished(Tp::PendingOperation *op);
void onCallChannelInvalidated();
private:
void onTfChannelClosed();
void onContentAdded(const QTf::ContentPtr & tfContent);
void onContentRemoved(const QTf::ContentPtr & tfContent);
void onFsConferenceAdded(const QGst::ElementPtr & conference);
void onFsConferenceRemoved(const QGst::ElementPtr & conference);
void onBusMessage(const QGst::MessagePtr & message);
private:
Tp::CallChannelPtr m_callChannel;
QTf::ChannelPtr m_tfChannel;
QGst::PipelinePtr m_pipeline;
TfContentHandlerFactory *m_factory;
uint m_channelClosedCounter;
QList<QGlib::ObjectPtr> m_fsElementAddedNotifiers;
QHash<QTf::ContentPtr, TfContentHandler*> m_contents;
};
} // KTpCallPrivate
#endif
| gpl-2.0 |
ottok/mariadb-galera-10.0 | storage/perfschema/table_esgs_by_user_by_event_name.cc | 5462 | /* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
/**
@file storage/perfschema/table_esgs_by_user_by_event_name.cc
Table EVENTS_STAGES_SUMMARY_BY_USER_BY_EVENT_NAME (implementation).
*/
#include "my_global.h"
#include "my_pthread.h"
#include "pfs_instr_class.h"
#include "pfs_column_types.h"
#include "pfs_column_values.h"
#include "table_esgs_by_user_by_event_name.h"
#include "pfs_global.h"
#include "pfs_account.h"
#include "pfs_visitor.h"
THR_LOCK table_esgs_by_user_by_event_name::m_table_lock;
static const TABLE_FIELD_TYPE field_types[]=
{
{
{ C_STRING_WITH_LEN("USER") },
{ C_STRING_WITH_LEN("char(16)") },
{ NULL, 0}
},
{
{ C_STRING_WITH_LEN("EVENT_NAME") },
{ C_STRING_WITH_LEN("varchar(128)") },
{ NULL, 0}
},
{
{ C_STRING_WITH_LEN("COUNT_STAR") },
{ C_STRING_WITH_LEN("bigint(20)") },
{ NULL, 0}
},
{
{ C_STRING_WITH_LEN("SUM_TIMER_WAIT") },
{ C_STRING_WITH_LEN("bigint(20)") },
{ NULL, 0}
},
{
{ C_STRING_WITH_LEN("MIN_TIMER_WAIT") },
{ C_STRING_WITH_LEN("bigint(20)") },
{ NULL, 0}
},
{
{ C_STRING_WITH_LEN("AVG_TIMER_WAIT") },
{ C_STRING_WITH_LEN("bigint(20)") },
{ NULL, 0}
},
{
{ C_STRING_WITH_LEN("MAX_TIMER_WAIT") },
{ C_STRING_WITH_LEN("bigint(20)") },
{ NULL, 0}
}
};
TABLE_FIELD_DEF
table_esgs_by_user_by_event_name::m_field_def=
{ 7, field_types, 0, (uint*) 0 };
PFS_engine_table_share
table_esgs_by_user_by_event_name::m_share=
{
{ C_STRING_WITH_LEN("events_stages_summary_by_user_by_event_name") },
&pfs_truncatable_acl,
table_esgs_by_user_by_event_name::create,
NULL, /* write_row */
table_esgs_by_user_by_event_name::delete_all_rows,
NULL, /* get_row_count */
1000, /* records */
sizeof(pos_esgs_by_user_by_event_name),
&m_table_lock,
&m_field_def,
false /* checked */
};
PFS_engine_table*
table_esgs_by_user_by_event_name::create(void)
{
return new table_esgs_by_user_by_event_name();
}
int
table_esgs_by_user_by_event_name::delete_all_rows(void)
{
reset_events_stages_by_thread();
reset_events_stages_by_account();
reset_events_stages_by_user();
return 0;
}
table_esgs_by_user_by_event_name::table_esgs_by_user_by_event_name()
: PFS_engine_table(&m_share, &m_pos),
m_row_exists(false), m_pos(), m_next_pos()
{}
void table_esgs_by_user_by_event_name::reset_position(void)
{
m_pos.reset();
m_next_pos.reset();
}
int table_esgs_by_user_by_event_name::rnd_init(bool scan)
{
m_normalizer= time_normalizer::get(stage_timer);
return 0;
}
int table_esgs_by_user_by_event_name::rnd_next(void)
{
PFS_user *user;
PFS_stage_class *stage_class;
for (m_pos.set_at(&m_next_pos);
m_pos.has_more_user();
m_pos.next_user())
{
user= &user_array[m_pos.m_index_1];
if (user->m_lock.is_populated())
{
stage_class= find_stage_class(m_pos.m_index_2);
if (stage_class)
{
make_row(user, stage_class);
m_next_pos.set_after(&m_pos);
return 0;
}
}
}
return HA_ERR_END_OF_FILE;
}
int
table_esgs_by_user_by_event_name::rnd_pos(const void *pos)
{
PFS_user *user;
PFS_stage_class *stage_class;
set_position(pos);
DBUG_ASSERT(m_pos.m_index_1 < user_max);
user= &user_array[m_pos.m_index_1];
if (! user->m_lock.is_populated())
return HA_ERR_RECORD_DELETED;
stage_class= find_stage_class(m_pos.m_index_2);
if (stage_class)
{
make_row(user, stage_class);
return 0;
}
return HA_ERR_RECORD_DELETED;
}
void table_esgs_by_user_by_event_name
::make_row(PFS_user *user, PFS_stage_class *klass)
{
pfs_lock lock;
m_row_exists= false;
user->m_lock.begin_optimistic_lock(&lock);
if (m_row.m_user.make_row(user))
return;
m_row.m_event_name.make_row(klass);
PFS_connection_stage_visitor visitor(klass);
PFS_connection_iterator::visit_user(user, true, true, & visitor);
if (! user->m_lock.end_optimistic_lock(&lock))
return;
m_row_exists= true;
m_row.m_stat.set(m_normalizer, & visitor.m_stat);
}
int table_esgs_by_user_by_event_name
::read_row_values(TABLE *table, unsigned char *buf, Field **fields,
bool read_all)
{
Field *f;
if (unlikely(! m_row_exists))
return HA_ERR_RECORD_DELETED;
/* Set the null bits */
DBUG_ASSERT(table->s->null_bytes == 1);
buf[0]= 0;
for (; (f= *fields) ; fields++)
{
if (read_all || bitmap_is_set(table->read_set, f->field_index))
{
switch(f->field_index)
{
case 0: /* USER */
m_row.m_user.set_field(f);
break;
case 1: /* EVENT_NAME */
m_row.m_event_name.set_field(f);
break;
default: /* 2, ... COUNT/SUM/MIN/AVG/MAX */
m_row.m_stat.set_field(f->field_index - 2, f);
break;
}
}
}
return 0;
}
| gpl-2.0 |
igorwwwwwwwwwwwwwwwwwwww/test | includes/Xms/Core/CommonXml.php | 49814 | <?php namespace Xms\Core;
use ErrorException;
use DOMNode;
use DOMNodeList;
use DOMElement;
use DOMDocument;
use Closure;
/*
* XMS - Online Web Development
*
* Copyright (c) 2010 Cezar Lucan [email protected]
* Licensed under GPL license.
* http://www.aws-dms.com
*
* Date: 2010-10-24
*/
//0.47 ::append,::prepend,::replaceWith,::before,::after - removed support for ->check()
//0.46 $GLOBALS["XMS_SERVER_CONSOLE"] now observable instance property Xms::XMS_SERVER_CONSOLE
//0.45 ::has fixed to return array when $commit = false
//0.40 ::context moved here; ::setContext() and ::getContext()
//0.40 ::siblings
//0.37 ::elements(), __invoke direct call to return $obj->q()
//0.37: removed elements operations like ::bind(),.. from versions 0.34-36; use ::elements() instead
//0.36 ::detachAllEventHandlers(), ::detachEventHandler()
//0.35 ::trigger(),::on()
//0.34 ::bind(),::unbind(),::unbindAll() to support same XmsDomElement methods
//0.31 updated comments - minimal changes
//0.30 ::filter() , ::all(), ::find() to accept additional parameters to invoke the callback
//0.29 ::prop($name,$value) to change properties of DOMNodes; only works on pi() and text() like ::prop("data","SOME DATA")
//0.27 2015-03-14: properly commented
//0.24 2015-03-14: ::find($selector,$callback[,$optionalArg1,$optionalArg2,])
//0.20 2015-03-12: ::filter() , ::all() , ::add(), ::has(selector) and ::has(selector,commit), ::lastQuery
//0.20 2015-03-12: old version of ::filter() removed, ::each() simplified
//0.20 2015-02-28: ::addClass(), ::removeClass(), ::hasClass(), ::has()
//0.18 2015-02-27 ::remove(), ::cloneResults()
//0.16 2015-02-27 ::append(),::prepend(),::before(),::after(),::replace(),::replaceContent() argumente parsate cu normalizeOperationsInput: EX: append(string) sau append(DOMNode) sau append(DOMNodeList) sau append(array(string,domNode,DOMNodeList,array))
//0.14 2015-02-26 ::each() - if callback is closure it passes first argument as $this of the callback
//0.14 2015-02-26 trows exception on ::q() and ::e() malformed xpath or context
//0.14 2015-02-25 extends XmsOverload
//0.13 2015-1-30: replaceContent fixed not append if document fragment is empty
//0.12 ::removeAttributes()
//0.12 removed copyElement()
//0.12 returns attr value of first item of results if only attr name specified
//0.12 removed cssq() and csse()
//0.11-2013-06-01: fixed call time pass by reference for append, prepend lambda functions
//0.10-2010-11-03: dirty output fixes
//0.9: ::filter(xpath) - query on a clone of $this;
//0.9: check , filter(function) - works, filter(xpath) - works
//0.8: ::each() - now support multiple arguments; these will be transmitted to the function; first arg will be always $el
/*
* Abstract class providing extended xml functionality
*/
abstract class CommonXml extends XmsOverload
{
const version = "0.46";
const releaseDate = "2015-05-03";
public $doc;
public $results = array();
public $xpath;
public $lastQuery;
protected $context;
private $check;
/**
* executes xpath query over the xml document
*
* @param string $query
* @param DOMNode $context - optional - the context node if needed
* @return CommonXml
*/
final public function q($query = ".", $context = FALSE)
{
$this->lastQuery = $query;
if (!$context)
$this->results = $this->normalizeOperationsInput($this->xpath->query($query, $this->context));
else
$this->results = $this->normalizeOperationsInput($this->xpath->query($query, $context));
$this->topmost()->XMS_SERVER_CONSOLE = get_class($this) . "::" . __FUNCTION__ . " executing xpath query\t" . $query . "\n\t" . sizeof($this->results) . " results \n";
if ($this->results === FALSE) {
$this->results = array();
throw new ErrorException("\n Expression is malformed or the contextnode is invalid in " . __FUNCTION__ . " of " . get_class($this) . "\n");
}
return $this;
}
/**
* evaluates xpath query over the xml document
*
* @param string $query
* @param DOMNode $context - optional - the context node if needed
* @return CommonXml
*/
final public function e($query = ".", $context = FALSE)
{
$this->lastQuery = $query;
if (!$context)
$this->results = $this->normalizeOperationsInput($this->xpath->evaluate($query, $this->context));
else
$this->results = $this->normalizeOperationsInput($this->xpath->evaluate($query, $context));
$this->topmost()->XMS_SERVER_CONSOLE = get_class($this) . "::" . __FUNCTION__ . " executing xpath query\t" . $query . "\n\t" . sizeof($this->results) . " results \n";
if ($this->results === FALSE) {
$this->results = array();
throw new ErrorException("\n Expression is malformed or the contextnode is invalid in " . __FUNCTION__ . " of " . get_class($this) . "\n");
}
return $this;
}
/**
* if invoked directly after instance created, execute q method with given parameters
*
* @param DOMNode $context
* @return CommonXml
*/
function __invoke()
{
return call_user_func_array(array(
$this,
"q"
), func_get_args());
}
function __clone()
{
$this->results = array();
$this->check = null;
$this->context = null;
}
/**
* sets the default query cotext
*
* @param DOMNode $context
* @return CommonXml
*/
final public function setContext(&$context)
{
$this->context = $context;
return $this;
}
/**
* retrieve the default query context
*
* @return DOMNode
*/
final public function getContext()
{
return $this->context;
}
/**
* executes a callback for each result
* if $callback is closure $this is the current DOMElement
*
* @param callback $callback
* @param mixed optional $params - extra parameters to pass to the $callback
* @return CommonXml
*/
final public function each($callback)
{
//TODO: schimb each cu while
if (func_num_args() > 0) {
$params = func_get_args();
if (is_callable($callback)) {
if (sizeof($this->results) > 0)
foreach ($this->results as $result) {
//& de mai jos e important altfel avem warning;
$params[0] = &$result;
$this->topmost()->XMS_SERVER_CONSOLE = get_class($this) . "::" . __FUNCTION__ . " executing callback " . ($params[0] instanceof DOMNode ? "on " . $params[0]->nodeName : "") . "\n";
//daca este closure
if (Utils::is_closure($callback)) {
//if($callback instanceof Closure)
//si este definit parametrul 0 (aka $el)
if (gettype($params[0]) == "object" && $params[0] instanceof DOMNode) {
//apeleaza funtia cu $this = $el
$callback = $callback->bindTo($params[0]);
$this->topmost()->XMS_SERVER_CONSOLE = get_class($this) . "::" . __FUNCTION__ . " binding callback to " . $params[0]->nodeName . "\n";
}
} else {
$this->topmost()->XMS_SERVER_CONSOLE = get_class($this) . "::" . __FUNCTION__ . " NOT a closure\n";
}
$fres = call_user_func_array($callback, $params);
if ($fres === FALSE)
$this->topmost()->XMS_SERVER_CONSOLE = get_class($this) . "::" . __FUNCTION__ . " was NOT ABLE TO EXECUTE given callback\n";
}
} else {
$this->topmost()->XMS_SERVER_CONSOLE = get_class($this) . "::" . __FUNCTION__ . " ERROR executing callback: NONE CALLABLE\n";
}
} else {
$this->topmost()->XMS_SERVER_CONSOLE = get_class($this) . "::" . __FUNCTION__ . " NO callback supplied\n";
}
return $this;
}
/**
* executes a callback with paramaters $this->results
* if $callback is closure $this is the CommonXml object that filter methods belongs to
*
* @param callback $callback
* @param mixed $param1,$param2,... additional parameters to invoke the callback with;
* @return CommonXml
*/
final public function all($callback)
{
$params = func_get_args();
if (is_callable($callback)) {
if (Utils::is_closure($callback))
$callback = $callback->bindTo($this);
$params[0] = &$this->results;
$checkForResults = call_user_func_array($callback, $params);
if ($checkForResults === FALSE)
$this->topmost()->XMS_SERVER_CONSOLE = get_class($this) . "::" . __FUNCTION__ . " callback could not be executed\n";
}
return $this;
}
/**
* executes $toCheck callback before executing code in below methods; $toCheck needs to return TRUE for method to execute
* used in methods like append, perpend, before, after, text, attr, removeAttr, replace, replaceContent, text, removeChilds
*
* @param callback $toCheck
* @return CommonXml
*/
final public function check($toCheck)
{
if (is_callable($toCheck))
$this->check = $toCheck;
else
$this->check = FALSE;
return $this;
}
/**
* reduces results to the ones that pass the test of the $callback;
* $callback returns TRUE to keep the element;
* $callback can be either function, lambda or closure
* $callback only argument is current result
* if $callback is closure $this is the CommonXml object that filter methods belongs to
*
* @param callback $callback ($el,$param1,$param2,....){$this is the DOMNode;$param1,$param2,... are the optional ones of ::filter;}
* @param mixed $param1,$param2,... additional parameters to invoke the callback with;
* @return CommonXml
*/
final public function filter($callback)
{
$params = func_get_args();
if (is_callable($callback)) {
if (Utils::is_closure($callback))
$callback = $callback->bindTo($this);
$theyDo = [];
foreach ($this->results as $result) {
if ($result instanceof DOMNode) {
$params[0] = &$result;
$checkForResults = call_user_func_array($callback, $params);
if ($checkForResults === TRUE)
$theyDo[] = $result;
else {
$this->topmost()->XMS_SERVER_CONSOLE = get_class($this) . "::" . __FUNCTION__ . " either the callback could not be executed or the element did not passed the test\n";
}
}
}
$this->results = $theyDo;
}
return $this;
}
/**
* executes the given callback for all nodes given by selector in the context of each Xml::results
* if callback is closure $this is the current DOMElement
*
* @param string $selector
* @param callback $callback ($el,$context,$param1,$param2,....){$this is the DOMNode;$param1,$param2,... are the optional ones of ::find;}
* @param mixed $param1,$param2,... additional parameters to invoke the callback with;
* @return CommonXml
*/
final public function find($selector, $callback)
{
$params = func_get_args();
if ($selector && is_callable($callback)) {
//only executes below if we have a $selector and a callable $callback
$selector = trim($selector);
//trim whitespace if any
foreach ($this->results as $result) {
//for each result
if ($result instanceof DOMNode) {
//if is DOMNode
$checkForResults = $this->xpath->query($selector, $result);
//find nodes given by $selector in context $result
if ($checkForResults === FALSE)
//if query returns FALSE
throw new ErrorException("\n Expression is malformed or invalid context " . __FUNCTION__ . " of " . get_class($this) . "\n");
else if ($checkForResults instanceof DOMNodeList && $checkForResults->length > 0) {
//if we have nodes in context
$params[1] = &$result;
//second parameter of our callback is the contextNode we run the query to
$checkForResults = $this->normalizeOperationsInput($checkForResults);
foreach ($checkForResults as $subquery_result) {
//for each node in context
$params[0] = &$subquery_result;
if (Utils::is_closure($callback))
$callback = $callback->bindTo($subquery_result);
//if is closure bind it to the Xml
call_user_func_array($callback, $params);
//and execute the callback
}
}
}
}
}
return $this;
}
/**
* filters the results with the ones having elements to match the given @$selector
*
* @param string $selector
* @param bool $commit if TRUE - the results of the subquery replace the result used as context; if FALSE returns an array with nodes matching $selector
* @return CommonXml
*/
final public function has($selector, $commit = FALSE)
{
if ($selector) {
$selector = trim($selector);
$theyDo = [];
foreach ($this->results as $result) {
if ($result instanceof DOMNode) {
$checkForResults = $this->xpath->query($selector, $result);
if ($checkForResults === FALSE)
throw new ErrorException("\n Expression is malformed or invalid context " . __FUNCTION__ . " of " . get_class($this) . "\n");
else if ($checkForResults instanceof DOMNodeList && $checkForResults->length > 0) {
if ($commit) {
foreach ($checkForResults as $subquery_result)
$theyDo[] = $subquery_result;
} else
$theyDo[] = $result;
}
}
}
}
if ($commit) {
$this->results = $theyDo;
return $this;
} else
return $theyDo;
}
/**
* retrieve the $index element of the results
* if $callback is given run it for the $index element of the results and return $this
*
* @param integer $index
* @param callback $callback ($el,$results,$param1,$param2,....){$this is the DOMNode if $callback is closure;$param1,$param2,... are the optional ones}
* @param mixed $param1,$param2,... additional parameters to invoke the callback with;
* @return CommonXml
* @return DOMNode
*/
final public function get($index, $callback = null)
{
if (is_callable($callback)) {
$params = func_get_args();
if ($callback instanceof Closure)
$callback = $callback->bindTo($this->results[$index]);
$params[0] = &$this->results[$index];
$params[1] = &$this->results;
call_user_func_array($callback, $params);
return $this;
}
else {
if ($this->results[$index] instanceof DOMNode)
return $this->results[$index];
}
}
/**
* find if current node is matching given selector; if a callback is provided, run it for each result, otherwise commit changes in CommonXml::results
* applies to CommonXml::results and not CommonXml::context
*
* @param string $selector
* @param callback $callback ($el,$context,$param1,$param2,....){$this is the DOMNode;$param1,$param2,... are the optional ones of ::find;}
* @param mixed $param1,$param2,... additional parameters to invoke the callback with;
* @return CommonXml
*/
final public function is($selector, $callback = null)
{
if (!is_callable($callback))
$this->has("self::" . $selector, TRUE);
else {
$params = func_get_args();
$params[0] = "self::" . $selector;
call_user_func_array(array(
$this,
"find"
), $params);
}
return $this;
}
/**
* find any descendants or self matching given selector; if a callback is provided, run it for each result, otherwise commit changes in CommonXml::results
* applies to CommonXml::results and not CommonXml::context
*
* @param string $selector
* @param callback $callback ($el,$context,$param1,$param2,....){$this is the DOMNode;$param1,$param2,... are the optional ones of ::find;}
* @param mixed $param1,$param2,... additional parameters to invoke the callback with;
* @return CommonXml
*/
final public function descendantsAndSelf($selector, $callback = null)
{
if (!is_callable($callback))
$this->has("descendant-or-self::" . $selector, TRUE);
else {
$params = func_get_args();
$params[0] = "descendant-or-self::" . $selector;
call_user_func_array(array(
$this,
"find"
), $params);
}
return $this;
}
/**
* find any descendants matching given selector; if a callback is provided, run it for each result, otherwise commit changes in CommonXml::results
* applies to CommonXml::results and not CommonXml::context
*
* @param string $selector
* @param callback $callback ($el,$context,$param1,$param2,....){$this is the DOMNode;$param1,$param2,... are the optional ones of ::find;}
* @param mixed $param1,$param2,... additional parameters to invoke the callback with;
* @return CommonXml
*/
final public function descendants($selector, $callback = null)
{
if (!is_callable($callback))
$this->has("descendant::" . $selector, TRUE);
else {
$params = func_get_args();
$params[0] = "descendant::" . $selector;
call_user_func_array(array(
$this,
"find"
), $params);
}
return $this;
}
/**
* find parent nodes, including self, matching given selector; if a callback is provided, run it for each result, otherwise commit changes in CommonXml::results
* applies to CommonXml::results and not CommonXml::context
*
* @param string $selector
* @param callback $callback ($el,$context,$param1,$param2,....){$this is the DOMNode;$param1,$param2,... are the optional ones of ::find;}
* @param mixed $param1,$param2,... additional parameters to invoke the callback with;
* @return CommonXml
*/
final public function parentsAndSelf($selector, $callback = null)
{
if (!is_callable($callback))
$this->has("ancestor-or-self::" . $selector, TRUE);
else {
$params = func_get_args();
$params[0] = "ancestor-or-self::" . $selector;
call_user_func_array(array(
$this,
"find"
), $params);
}
return $this;
}
/**
* find parent nodes matching given selector; if a callback is provided, run it for each result, otherwise commit changes in CommonXml::results
* applies to CommonXml::results and not CommonXml::context
*
* @param string $selector
* @param callback $callback ($el,$context,$param1,$param2,....){$this is the DOMNode;$param1,$param2,... are the optional ones of ::find;}
* @param mixed $param1,$param2,... additional parameters to invoke the callback with;
* @return CommonXml
*/
final public function parents($selector, $callback = null)
{
if (!is_callable($callback))
$this->has("ancestor::" . $selector, TRUE);
else {
$params = func_get_args();
$params[0] = "ancestor::" . $selector;
call_user_func_array(array(
$this,
"find"
), $params);
}
return $this;
}
/**
* find child nodes matching given selector; if a callback is provided, run it for each result, otherwise commit changes in CommonXml::results
* applies to CommonXml::results and not CommonXml::context
*
* @param string $selector
* @param callback $callback ($el,$context,$param1,$param2,....){$this is the DOMNode;$param1,$param2,... are the optional ones of ::find;}
* @param mixed $param1,$param2,... additional parameters to invoke the callback with;
* @return CommonXml
*/
final public function children($selector, $callback = null)
{
if (!is_callable($callback))
$this->has("child::" . $selector, TRUE);
else {
$params = func_get_args();
$params[0] = "child::" . $selector;
call_user_func_array(array(
$this,
"find"
), $params);
}
return $this;
}
/**
* find following nodes matching given selector; if a callback is provided, run it for each result, otherwise commit changes in CommonXml::results
* applies to CommonXml::results and not CommonXml::context
*
* @param string $selector
* @param callback $callback ($el,$context,$param1,$param2,....){$this is the DOMNode;$param1,$param2,... are the optional ones of ::find;}
* @param mixed $param1,$param2,... additional parameters to invoke the callback with;
* @return CommonXml
*/
final public function following($selector, $callback = null)
{
if (!is_callable($callback))
$this->has("following::" . $selector, TRUE);
else {
$params = func_get_args();
$params[0] = "following::" . $selector;
call_user_func_array(array(
$this,
"find"
), $params);
}
return $this;
}
/**
* find preceding nodes matching given selector; if a callback is provided, run it for each result, otherwise commit changes in CommonXml::results
* applies to CommonXml::results and not CommonXml::context
*
* @param string $selector
* @param callback $callback ($el,$context,$param1,$param2,....){$this is the DOMNode;$param1,$param2,... are the optional ones of ::find;}
* @param mixed $param1,$param2,... additional parameters to invoke the callback with;
* @return CommonXml
*/
final public function preceding($selector, $callback = null)
{
if (!is_callable($callback))
$this->has("preceding::" . $selector, TRUE);
else {
$params = func_get_args();
$params[0] = "preceding::" . $selector;
call_user_func_array(array(
$this,
"find"
), $params);
}
return $this;
}
/**
* find the attribute nodes matching given selector; if a callback is provided, run it for each result, otherwise commit changes in CommonXml::results
* applies to CommonXml::results and not CommonXml::context
*
* @param string $selector
* @param callback $callback ($el,$context,$param1,$param2,....){$this is the DOMNode;$param1,$param2,... are the optional ones of ::find;}
* @param mixed $param1,$param2,... additional parameters to invoke the callback with;
* @return CommonXml
*/
final public function attribute($selector, $callback = null)
{
if (!is_callable($callback))
$this->has("attribute::" . $selector, TRUE);
else {
$params = func_get_args();
$params[0] = "attribute::" . $selector;
call_user_func_array(array(
$this,
"find"
), $params);
}
return $this;
}
/**
* find next siblings matching given selector; if a callback is provided, run it for each result, otherwise commit changes in CommonXml::results
* applies to CommonXml::results and not CommonXml::context
*
* @param string $selector
* @param callback $callback ($el,$context,$param1,$param2,....){$this is the DOMNode;$param1,$param2,... are the optional ones of ::find;}
* @param mixed $param1,$param2,... additional parameters to invoke the callback with;
* @return CommonXml
*/
final public function next($selector, $callback = null)
{
if (!is_callable($callback))
$this->has("following-sibling::" . $selector, TRUE);
else {
$params = func_get_args();
$params[0] = "following-sibling::" . $selector;
call_user_func_array(array(
$this,
"find"
), $params);
}
return $this;
}
/**
* find prev siblings matching given selector; if a callback is provided, run it for each result, otherwise commit changes in CommonXml::results
* applies to CommonXml::results and not CommonXml::context
*
* @param string $selector
* @param callback $callback ($el,$context,$param1,$param2,....){$this is the DOMNode;$param1,$param2,... are the optional ones of ::find;}
* @param mixed $param1,$param2,... additional parameters to invoke the callback with;
* @return CommonXml
*/
final public function prev($selector, $callback = null)
{
if (!is_callable($callback))
$this->has("preceding-sibling::" . $selector, TRUE);
else {
$params = func_get_args();
$params[0] = "preceding-sibling::" . $selector;
call_user_func_array(array(
$this,
"find"
), $params);
}
return $this;
}
/**
* find siblings matching given selector; if a callback is provided, run it for each result, otherwise commit changes in CommonXml::results
* applies to CommonXml::results and not CommonXml::context
*
* @param string $selector
* @param callback $callback ($el,$context,$param1,$param2,....){$this is the DOMNode;$param1,$param2,... are the optional ones of ::find;}
* @param mixed $param1,$param2,... additional parameters to invoke the callback with;
* @return CommonXml
*/
final public function siblings($selector, $callback = null)
{
if (!is_callable($callback))
$this->has("preceding-sibling::" . $selector . "|" . "following-sibling::" . $selector, TRUE);
else {
$params = func_get_args();
$params[0] = "preceding-sibling::" . $selector . "|" . "following-sibling::" . $selector;
call_user_func_array(array(
$this,
"find"
), $params);
}
return $this;
}
/**
* adds the results of xpath query for selector to existing set of results
*
* @param string $selector
* @return CommonXml
*/
final public function add($selector)
{
if ($selector) {
$selector = trim($selector);
$newResults = $this->xpath->query($selector);
if ($newResults instanceof DOMNodeList && $newResults->length > 0)
foreach ($newResults as $result) {
if ($result instanceof DOMNode)
$this->results[] = $result;
}
}
return $this;
}
/**
* changes a native property of DOMNode if possible, for each results
*
* @param string $name
* @param string $value
* @return CommonXml
*/
final public function prop($name, $value)
{
if (!empty($name)) {
$name = trim($name);
foreach ($this->results as $result) {
if ($result instanceof DOMNode)
$result->$name = $value;
}
}
return $this;
}
/**
* filters the results to the ones having given class
*
* @param string $c
* @return CommonXml
*/
final public function hasClass($c)
{
if (!empty($c)) {
$c = trim($c);
$theyDo = [];
foreach ($this->results as $result) {
if ($result instanceof DOMNode && method_exists($result, "hasAttribute"))
if ($result->hasAttribute("class")) {
$attrVals = preg_split("/[\s,]+/", $result->getAttribute("class"));
foreach ($attrVals as $class)
if ($class == $c)
$theyDo[] = $result;
}
}
$this->results = $theyDo;
}
return $this;
}
/**
* removes given class
*
* @param string $c
* @param string $glue - default space character; used to implode the class names
* @return CommonXml
*/
final public function removeClass($c, $glue = " ")
{
if (!$this->removeClass_helper instanceof Closure)
$this->removeClass_helper = function(&$el, $class, $g) {
$cs = array();
if ($this instanceof DOMNode && method_exists($this, "hasAttribute"))
if ($this->hasAttribute("class")) {
$attrVals = preg_split("/[\s,]+/", $this->getAttribute("class"));
foreach ($attrVals as $c)
if ($class != $c)
$cs[] = $c;
$this->setAttribute("class", implode($g, $cs));
}
};
if (!empty($c)) {
$c = trim($c);
$this->each($this->removeClass_helper, $c, $glue);
}
return $this;
}
/**
* adds given class
*
* @param string $c
* @param string $glue - default space character
* @return CommonXml
*/
final public function addClass($c, $glue = " ")
{
if (!$this->addClass_helper instanceof Closure)
$this->addClass_helper = function(&$el, $class, $g) {
if ($this instanceof DOMNode && method_exists($this, "hasAttribute")) {
if ($this->getAttribute("class"))
$attrVals = preg_split("/[\s,]+/", $this->getAttribute("class"));
else
$attrVals = [];
if (!in_array($class, $attrVals)) {
$attrVals[] = $class;
$this->setAttribute("class", implode($g, $attrVals));
}
}
};
if (!empty($c)) {
$c = trim($c);
$this->each($this->addClass_helper, $c, $glue);
}
return $this;
}
/**
* returns class instance for 2 arguments or attr value for one argument (attr name)
*
* @param string $name
* @param string $value
* @return CommonXml if both $name and $value are provided;
* @return string if only $nameis provided
*/
final public function attr()
{
$attrName = func_get_arg(0);
$attrValue = func_get_arg(1);
if (!$this->attr_helper instanceof Closure)
$this->attr_helper = function(&$el, $attrName, $attrValue) {
if ($el->nodeType == 1)
$el->setAttribute($attrName, $attrValue);
};
if (func_num_args() == 2) {
$this->each($this->attr_helper, $attrName, $attrValue);
return $this;
} else if (func_num_args() == 1) {
if (gettype($this->get(0)) == "object" && $this->get(0) instanceof DOMNode)
if ($this->get(0)->nodeType == 1)
return $this->get(0)->getAttribute($attrName);
}
}
/**
* removes given attribute
*
* @param string $name
* @return CommonXml
*/
final public function removeAttr($aN)
{
if (func_num_args() >= 1) {
if (!$removeAttr_helper instanceof Closure)
$removeAttr_helper = function(&$el, $attrName) {
if ($el->nodeType == 1)
if ($el->hasAttribute($attrName))
$el->removeAttribute($attrName);
};
$this->each($removeAttr_helper, $aN);
}
return $this;
}
/**
* removes all atributes
*
* @return CommonXml
*/
final public function removeAttributes()
{
if (!$this->removeAttributes_helper instanceof Closure)
$this->removeAttributes_helper = function(&$el) {
if ($el->nodeType == 1)
while ($el->hasAttributes())
$el->removeAttributeNode($el->attributes->item(0));
};
$this->each($this->removeAttributes_helper, FALSE);
return $this;
}
/**
* creates an array of DOMNodes
*
* @param mixed $input - can be either string, DOMNode, DOMNodeList, array of them
* @return array
*/
final public function normalizeOperationsInput($input)
{
$output = array();
$doc = $this->doc;
if (sizeof(func_num_args()) > 0) {
//folosesc DOMDocument daca a fost dat ca parametru al functiei
if (sizeof(func_num_args()) > 1)
if (!func_get_arg(1) instanceof DOMDocument)
$doc = func_get_arg(1);
switch (gettype($input)) {
case "string" :
//daca input ="" este tot string
if (!empty($input)) {
$df = $doc->createDocumentFragment();
$df->appendXML($input);
$output[] = $df;
}
break;
case "object" :
//DOMNodeList DOMNode
if ($input instanceof DOMNode) {
$output[] = $input;
} else if ($input instanceof DOMNodeList) {
foreach ($input as $node)
$output[] = $node;
}
break;
case "array" :
foreach ($input as $v)
$output = array_merge($output, $this->normalizeOperationsInput($v));
break;
}
}
return $output;
}
/**
* clones all nodes in results
*
* @return array
*/
final public function cloneResults()
{
$output = [];
foreach ($this->results as $res)
$output[] = $res->cloneNode(TRUE);
return $output;
}
/**
* appends to all nodes in results
*
* @param mixed - can be either string, DOMNode, DOMNodeList, array of them
* @return CommonXml
*/
final public function append($with)
{
if (func_num_args() >= 1) {
$nodes = $this->normalizeOperationsInput($with);
$append_helper = function(&$el, $df, $cloneNode) {
if ($cloneNode) {
if (method_exists($df, "cloneNode"))
$newdf = $df->cloneNode(TRUE);
else
$newdf = $df;
} else
$newdf = $df;
if ($newdf->ownerDocument !== $el->ownerDocument)
if ($newdf instanceof DOMNode)
$newdf = $el->ownerDocument->importNode($newdf, TRUE);
if ($newdf instanceof DOMNode)
$el->appendChild($newdf);
};
foreach ($nodes as $node)
$this->each($append_helper, $node, (sizeof($this->results) > 1 ? TRUE : FALSE));
}
return $this;
}
/**
* prepends to all nodes in results
*
* @param mixed - can be either string, DOMNode, DOMNodeList, array of them
* @return CommonXml
*/
final public function prepend($with)
{
if (func_num_args() >= 1) {
$nodes = $this->normalizeOperationsInput($with);
if (!$prepend_helper instanceof Closure)
$prepend_helper = function(&$el, $df, $cloneNode) {
if ($cloneNode) {
if (method_exists($df, "cloneNode"))
$newdf = $df->cloneNode(TRUE);
else
$newdf = $df;
} else
$newdf = $df;
if ($newdf->ownerDocument !== $el->ownerDocument)
if ($newdf instanceof DOMNode)
$newdf = $el->ownerDocument->importNode($newdf, TRUE);
if ($el->hasChildNodes()) {
if ($newdf instanceof DOMNode)
$el->insertBefore($newdf, $el->firstChild);
} else {
if ($newdf instanceof DOMNode)
$el->appendChild($newdf);
}
};
foreach ($nodes as $node)
$this->each($prepend_helper, $node, (sizeof($this->results) > 1 ? TRUE : FALSE));
}
return $this;
}
/**
* inserts before all nodes in results
*
* @param $with - mixed - can be either string, DOMNode, DOMNodeList, array of them
* @return CommonXml
*/
final public function before($with)
{
if (func_num_args() >= 1) {
$nodes = $this->normalizeOperationsInput($with);
if (!$before_helper instanceof Closure)
$before_helper = function(&$el, $df, $cloneNode) {
if ($cloneNode) {
if (method_exists($df, "cloneNode"))
$newdf = $df->cloneNode(TRUE);
else
$newdf = $df;
} else
$newdf = $df;
if ($newdf->ownerDocument !== $el->ownerDocument)
if ($newdf instanceof DOMNode)
$newdf = $el->ownerDocument->importNode($newdf, TRUE);
if ($newdf instanceof DOMNode)
$el->parentNode->insertBefore($newdf, $el);
};
foreach ($nodes as $node)
$this->each($before_helper, $node, (sizeof($this->results) > 1 ? TRUE : FALSE));
}
return $this;
}
/**
* inserts after all nodes in results
*
* @param $with - mixed - can be either string, DOMNode, DOMNodeList, array of them
* @return CommonXml
*/
final public function after($with)
{
if (func_num_args() >= 1) {
$nodes = $this->normalizeOperationsInput($with);
if (!$after_helper instanceof Closure)
$after_helper = function(&$el, $df, $cloneNode) {
if ($cloneNode) {
if (method_exists($df, "cloneNode"))
$newdf = $df->cloneNode(TRUE);
else
$newdf = $df;
} else
$newdf = $df;
if ($newdf->ownerDocument !== $el->ownerDocument)
if ($newdf instanceof DOMNode)
$newdf = $el->ownerDocument->importNode($newdf, TRUE);
if ($newdf instanceof DOMNode)
$el->parentNode->insertBefore($newdf, $el->nextSibling);
};
foreach ($nodes as $node)
$this->each($after_helper, $node, (sizeof($this->results) > 1 ? TRUE : FALSE));
}
return $this;
}
/**
* replaces all nodes in results
*
* @param mixed - can be either string, DOMNode, DOMNodeList, array of them
* @return CommonXml
*/
final public function replace()
{
if (func_num_args() == 1) {
$df = $this->doc->createDocumentFragment();
$nodes = $this->normalizeOperationsInput(func_get_arg(0));
//creez document fragment din arg(0); fac clona si import daca e nevoie
if (sizeof($this->results) > 1)
//daca sunt mai multe elemente in results facem clone
foreach ($nodes as $node)
if ($node->ownerDocument === $this->doc)
$df->appendChild($node->cloneNode(TRUE));
else
$df->appendChild($this->doc->importNode($node->cloneNode(TRUE), TRUE));
else
//avem doar un singur resultat al query
foreach ($nodes as $node)
if ($node->ownerDocument === $this->doc)
$df->appendChild($node);
else
$df->appendChild($this->doc->importNode($node, TRUE));
//cu df creat, fac replace, clona daca sunt mai multe results df initial daca results->length = 1
if (sizeof($this->results) > 1)
foreach ($this->results as $tor)
$tor->parentNode->replaceChild($df->cloneNode(TRUE), $tor);
else
foreach ($this->results as $tor)
$tor->parentNode->replaceChild($df, $tor);
}
return $this;
}
/**
* replaces content of all nodes in results
*
* @param $with - mixed - can be either string, DOMNode, DOMNodeList, array of them
* @return CommonXml
*/
final public function replaceContent($with)
{
if (func_num_args() >= 1) {
$nodes = $this->normalizeOperationsInput($with);
if (!$replaceContent_helper_removeContent instanceof Closure)
$replaceContent_helper_removeContent = function(&$el) {
while ($el->hasChildNodes())
$el->removeChild($el->firstChild);
foreach ($nodes as $node)
$el->appendChild($node->cloneNode(TRUE));
};
$this->each($replaceContent_helper_removeContent);
}
return $this;
}
/**
* removes child nodes of all nodes in results
*
* @return CommonXml
*/
final public function removeChilds()
{
foreach ($this->results as $tor)
if (method_exists($tor, "hasChildNodes"))
while ($tor->hasChildNodes())
$tor->removeChild($tor->childNodes->item(0));
return $this;
}
/**
* removes all nodes in results
*
* @return CommonXml
*/
final public function remove()
{
$this->each(function() {
$this->parentNode->removeChild($this);
});
return $this;
}
/**
* retrive or set the text content of all nodes in results
*
* @param string to set the text
* @return CommonXml
*/
final public function text()
{
if (func_num_args() == 1) {
$textContent = func_get_arg(0);
if (!$this->text_helper instanceof Closure)
$this->text_helper = function(&$el, $check, $textContent) {
if (!$check || ($check && is_callable($check) && call_user_func($check, $el))) {
$done = false;
if ($el->nodeType == XML_ELEMENT_NODE) {
if ($el->hasChildNodes())
foreach ($el->childNodes as $child)
if ($child->nodeType == XML_TEXT_NODE) {
$child->replaceData(0, strlen($child->wholeText), $textContent);
$done = true;
}
if (!$done)
$el->appendChild($el->ownerDocument->createTextNode($textContent));
$done = false;
}
}
};
if (function_exists($this->check))
$this->each($this->text_helper, $this->check, $textContent);
else
$this->each($this->text_helper, FALSE, $textContent);
return $this;
} else {
$toReturn = "";
foreach ($this->results as $result) {
//elemente
if ($result->nodeType == XML_ELEMENT_NODE)
$toReturn .= $result->textContent;
}
return $toReturn;
}
}
/**
* get a string of concatenated source of all DOMElements in results
*
* @param mixed - can be either string, DOMNode, DOMNodeList, array of them
* @return CommonXml
*/
final public function resultsAsSource()
{
$toReturn = "";
foreach ($this->results as $result) {
//elemente
if ($result->nodeType == XML_ELEMENT_NODE)
$toReturn .= $result->C14N();
}
return $toReturn;
}
/**
* get a string with concatenated source of all nodes in results
*
* @return string
*/
final public function xml()
{
$toReturn = "";
foreach ($this->results as $result)
$toReturn .= $result->C14N();
return $toReturn;
}
/**
* retrieve a documentFragment containing all nodes in results
*
* @return DOMDocumentFragment
*/
final public function resultsAsDocumentFragment()
{
$docFragment = $this->doc->createDocumentFragment();
foreach ($this->results as $result)
$docFragment->appendChild($result);
return $docFragment;
}
/**
* use to call a method of a DOMElement with given args
*
* @param string $method
* @param array $args - method arguments
* @return CommonXml
*/
final public function elements($method, $args = array())
{
if (!empty($method))
foreach ($this->results as $result)
if ($result instanceof DOMElement)
if (method_exists($result, $method))
call_user_func_array(array(
$result,
$method
), $args);
else {
trigger_error(get_class($result) . "::$method doesn't exists; trying if any callable property is available", E_USER_WARNING);
if (is_callable(array(
$result,
$method
))) {
trigger_error(get_class($result) . "::$method callable property found, executing", E_USER_NOTICE);
call_user_func_array(array(
$result,
$method
), $args);
} else
trigger_error(get_class($result) . "::$method callable property not found either", E_USER_WARNING);
}
return $this;
}
/**
* get the document root node
*
* @return DOMNode
*/
final public function getRootElement()
{
return $this->doc->documentElement;
}
/**
* retrieve the results as array to a variable
*
* @param array $destination - name of the variable
* @return CommonXml
*/
final public function to(&$destination)
{
$destination = $this->results;
return $this;
}
/**
* retrieve the first element of results
*
* @return DOMNode or FALSE
*/
final public function first()
{
if (gettype($this->results) == "array" && sizeof($this->results) > 0) {
if (gettype($this->get(0)) == "object" && $this->get(0) instanceof DOMNode)
return $this->get(0);
} else
return FALSE;
}
/**
* retrieve the last element of results
*
* @return DOMNode or FALSE
*/
final public function last()
{
if (gettype($this->results) == "array" && sizeof($this->results) > 0) {
if (gettype($this->get(sizeof($this->results) - 1)) == "object" && $this->get(sizeof($this->results) - 1) instanceof DOMNode)
return $this->get(sizeof($this->results) - 1);
} else
return FALSE;
}
public function parentVersion()
{
return parent::version;
}
public function __version__()
{
return array(
__CLASS__ => self::version,
get_parent_class($this) => parent::version
);
}
}
| gpl-2.0 |
friedrich420/N3-AEL-Kernel-NF1-v5- | net/ipv6/route.c | 75806 | /*
* Linux INET6 implementation
* FIB front-end.
*
* Authors:
* Pedro Roque <[email protected]>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
/* Changes:
*
* YOSHIFUJI Hideaki @USAGI
* reworked default router selection.
* - respect outgoing interface
* - select from (probably) reachable routers (i.e.
* routers in REACHABLE, STALE, DELAY or PROBE states).
* - always select the same router if it is (probably)
* reachable. otherwise, round-robin the list.
* Ville Nuorvala
* Fixed routing subtrees.
*/
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/types.h>
#include <linux/times.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/route.h>
#include <linux/netdevice.h>
#include <linux/in6.h>
#include <linux/mroute6.h>
#include <linux/init.h>
#include <linux/if_arp.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/nsproxy.h>
#include <linux/slab.h>
#include <net/net_namespace.h>
#include <net/snmp.h>
#include <net/ipv6.h>
#include <net/ip6_fib.h>
#include <net/ip6_route.h>
#include <net/ndisc.h>
#include <net/addrconf.h>
#include <net/tcp.h>
#include <linux/rtnetlink.h>
#include <net/dst.h>
#include <net/xfrm.h>
#include <net/netevent.h>
#include <net/netlink.h>
#include <asm/uaccess.h>
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
#endif
static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
const struct in6_addr *dest);
static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
static unsigned int ip6_default_advmss(const struct dst_entry *dst);
static unsigned int ip6_mtu(const struct dst_entry *dst);
static struct dst_entry *ip6_negative_advice(struct dst_entry *);
static void ip6_dst_destroy(struct dst_entry *);
static void ip6_dst_ifdown(struct dst_entry *,
struct net_device *dev, int how);
static int ip6_dst_gc(struct dst_ops *ops);
static int ip6_pkt_discard(struct sk_buff *skb);
static int ip6_pkt_discard_out(struct sk_buff *skb);
static void ip6_link_failure(struct sk_buff *skb);
static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
#ifdef CONFIG_IPV6_ROUTE_INFO
static struct rt6_info *rt6_add_route_info(struct net *net,
const struct in6_addr *prefix, int prefixlen,
const struct in6_addr *gwaddr, int ifindex,
unsigned pref);
static struct rt6_info *rt6_get_route_info(struct net *net,
const struct in6_addr *prefix, int prefixlen,
const struct in6_addr *gwaddr, int ifindex);
#endif
static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
{
struct rt6_info *rt = (struct rt6_info *) dst;
struct inet_peer *peer;
u32 *p = NULL;
if (!(rt->dst.flags & DST_HOST))
return NULL;
if (!rt->rt6i_peer)
rt6_bind_peer(rt, 1);
peer = rt->rt6i_peer;
if (peer) {
u32 *old_p = __DST_METRICS_PTR(old);
unsigned long prev, new;
p = peer->metrics;
if (inet_metrics_new(peer))
memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
new = (unsigned long) p;
prev = cmpxchg(&dst->_metrics, old, new);
if (prev != old) {
p = __DST_METRICS_PTR(prev);
if (prev & DST_METRICS_READ_ONLY)
p = NULL;
}
}
return p;
}
static inline const void *choose_neigh_daddr(struct rt6_info *rt, const void *daddr)
{
struct in6_addr *p = &rt->rt6i_gateway;
if (!ipv6_addr_any(p))
return (const void *) p;
return daddr;
}
static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst, const void *daddr)
{
struct rt6_info *rt = (struct rt6_info *) dst;
struct neighbour *n;
daddr = choose_neigh_daddr(rt, daddr);
n = __ipv6_neigh_lookup(&nd_tbl, dst->dev, daddr);
if (n)
return n;
return neigh_create(&nd_tbl, daddr, dst->dev);
}
static int rt6_bind_neighbour(struct rt6_info *rt, struct net_device *dev)
{
struct neighbour *n = __ipv6_neigh_lookup(&nd_tbl, dev, &rt->rt6i_gateway);
if (!n) {
n = neigh_create(&nd_tbl, &rt->rt6i_gateway, dev);
if (IS_ERR(n))
return PTR_ERR(n);
}
dst_set_neighbour(&rt->dst, n);
return 0;
}
static struct dst_ops ip6_dst_ops_template = {
.family = AF_INET6,
.protocol = cpu_to_be16(ETH_P_IPV6),
.gc = ip6_dst_gc,
.gc_thresh = 1024,
.check = ip6_dst_check,
.default_advmss = ip6_default_advmss,
.mtu = ip6_mtu,
.cow_metrics = ipv6_cow_metrics,
.destroy = ip6_dst_destroy,
.ifdown = ip6_dst_ifdown,
.negative_advice = ip6_negative_advice,
.link_failure = ip6_link_failure,
.update_pmtu = ip6_rt_update_pmtu,
.local_out = __ip6_local_out,
.neigh_lookup = ip6_neigh_lookup,
};
static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
{
unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
return mtu ? : dst->dev->mtu;
}
static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
{
}
static u32 *ip6_rt_blackhole_cow_metrics(struct dst_entry *dst,
unsigned long old)
{
return NULL;
}
static struct dst_ops ip6_dst_blackhole_ops = {
.family = AF_INET6,
.protocol = cpu_to_be16(ETH_P_IPV6),
.destroy = ip6_dst_destroy,
.check = ip6_dst_check,
.mtu = ip6_blackhole_mtu,
.default_advmss = ip6_default_advmss,
.update_pmtu = ip6_rt_blackhole_update_pmtu,
.cow_metrics = ip6_rt_blackhole_cow_metrics,
.neigh_lookup = ip6_neigh_lookup,
};
static const u32 ip6_template_metrics[RTAX_MAX] = {
[RTAX_HOPLIMIT - 1] = 0,
};
static struct rt6_info ip6_null_entry_template = {
.dst = {
.__refcnt = ATOMIC_INIT(1),
.__use = 1,
.obsolete = -1,
.error = -ENETUNREACH,
.input = ip6_pkt_discard,
.output = ip6_pkt_discard_out,
},
.rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
.rt6i_protocol = RTPROT_KERNEL,
.rt6i_metric = ~(u32) 0,
.rt6i_ref = ATOMIC_INIT(1),
};
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
static int ip6_pkt_prohibit(struct sk_buff *skb);
static int ip6_pkt_prohibit_out(struct sk_buff *skb);
static struct rt6_info ip6_prohibit_entry_template = {
.dst = {
.__refcnt = ATOMIC_INIT(1),
.__use = 1,
.obsolete = -1,
.error = -EACCES,
.input = ip6_pkt_prohibit,
.output = ip6_pkt_prohibit_out,
},
.rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
.rt6i_protocol = RTPROT_KERNEL,
.rt6i_metric = ~(u32) 0,
.rt6i_ref = ATOMIC_INIT(1),
};
static struct rt6_info ip6_blk_hole_entry_template = {
.dst = {
.__refcnt = ATOMIC_INIT(1),
.__use = 1,
.obsolete = -1,
.error = -EINVAL,
.input = dst_discard,
.output = dst_discard,
},
.rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
.rt6i_protocol = RTPROT_KERNEL,
.rt6i_metric = ~(u32) 0,
.rt6i_ref = ATOMIC_INIT(1),
};
#endif
/* allocate dst with ip6_dst_ops */
static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops,
struct net_device *dev,
int flags)
{
struct rt6_info *rt = dst_alloc(ops, dev, 0, 0, flags);
if (rt)
memset(&rt->rt6i_table, 0,
sizeof(*rt) - sizeof(struct dst_entry));
return rt;
}
static void ip6_dst_destroy(struct dst_entry *dst)
{
struct rt6_info *rt = (struct rt6_info *)dst;
struct inet6_dev *idev = rt->rt6i_idev;
struct inet_peer *peer = rt->rt6i_peer;
if (!(rt->dst.flags & DST_HOST))
dst_destroy_metrics_generic(dst);
if (idev) {
rt->rt6i_idev = NULL;
in6_dev_put(idev);
}
if (!(rt->rt6i_flags & RTF_EXPIRES) && dst->from)
dst_release(dst->from);
if (peer) {
rt->rt6i_peer = NULL;
inet_putpeer(peer);
}
}
static atomic_t __rt6_peer_genid = ATOMIC_INIT(0);
static u32 rt6_peer_genid(void)
{
return atomic_read(&__rt6_peer_genid);
}
void rt6_bind_peer(struct rt6_info *rt, int create)
{
struct inet_peer *peer;
peer = inet_getpeer_v6(&rt->rt6i_dst.addr, create);
if (peer && cmpxchg(&rt->rt6i_peer, NULL, peer) != NULL)
inet_putpeer(peer);
else
rt->rt6i_peer_genid = rt6_peer_genid();
}
static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
int how)
{
struct rt6_info *rt = (struct rt6_info *)dst;
struct inet6_dev *idev = rt->rt6i_idev;
struct net_device *loopback_dev =
dev_net(dev)->loopback_dev;
if (dev != loopback_dev && idev && idev->dev == dev) {
struct inet6_dev *loopback_idev =
in6_dev_get(loopback_dev);
if (loopback_idev) {
rt->rt6i_idev = loopback_idev;
in6_dev_put(idev);
}
}
}
static __inline__ int rt6_check_expired(const struct rt6_info *rt)
{
struct rt6_info *ort = NULL;
if (rt->rt6i_flags & RTF_EXPIRES) {
if (time_after(jiffies, rt->dst.expires))
return 1;
} else if (rt->dst.from) {
ort = (struct rt6_info *) rt->dst.from;
return (ort->rt6i_flags & RTF_EXPIRES) &&
time_after(jiffies, ort->dst.expires);
}
return 0;
}
static inline int rt6_need_strict(const struct in6_addr *daddr)
{
return ipv6_addr_type(daddr) &
(IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
}
/*
* Route lookup. Any table->tb6_lock is implied.
*/
static inline struct rt6_info *rt6_device_match(struct net *net,
struct rt6_info *rt,
const struct in6_addr *saddr,
int oif,
int flags)
{
struct rt6_info *local = NULL;
struct rt6_info *sprt;
if (!oif && ipv6_addr_any(saddr))
goto out;
for (sprt = rt; sprt; sprt = sprt->dst.rt6_next) {
struct net_device *dev = sprt->dst.dev;
if (oif) {
if (dev->ifindex == oif)
return sprt;
if (dev->flags & IFF_LOOPBACK) {
if (!sprt->rt6i_idev ||
sprt->rt6i_idev->dev->ifindex != oif) {
if (flags & RT6_LOOKUP_F_IFACE && oif)
continue;
if (local && (!oif ||
local->rt6i_idev->dev->ifindex == oif))
continue;
}
local = sprt;
}
} else {
if (ipv6_chk_addr(net, saddr, dev,
flags & RT6_LOOKUP_F_IFACE))
return sprt;
}
}
if (oif) {
if (local)
return local;
if (flags & RT6_LOOKUP_F_IFACE)
return net->ipv6.ip6_null_entry;
}
out:
return rt;
}
#ifdef CONFIG_IPV6_ROUTER_PREF
static void rt6_probe(struct rt6_info *rt)
{
struct neighbour *neigh;
/*
* Okay, this does not seem to be appropriate
* for now, however, we need to check if it
* is really so; aka Router Reachability Probing.
*
* Router Reachability Probe MUST be rate-limited
* to no more than one per minute.
*/
rcu_read_lock();
neigh = rt ? dst_get_neighbour_noref(&rt->dst) : NULL;
if (!neigh || (neigh->nud_state & NUD_VALID))
goto out;
read_lock_bh(&neigh->lock);
if (!(neigh->nud_state & NUD_VALID) &&
time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
struct in6_addr mcaddr;
struct in6_addr *target;
neigh->updated = jiffies;
read_unlock_bh(&neigh->lock);
target = (struct in6_addr *)&neigh->primary_key;
addrconf_addr_solict_mult(target, &mcaddr);
ndisc_send_ns(rt->dst.dev, NULL, target, &mcaddr, NULL);
} else {
read_unlock_bh(&neigh->lock);
}
out:
rcu_read_unlock();
}
#else
static inline void rt6_probe(struct rt6_info *rt)
{
}
#endif
/*
* Default Router Selection (RFC 2461 6.3.6)
*/
static inline int rt6_check_dev(struct rt6_info *rt, int oif)
{
struct net_device *dev = rt->dst.dev;
if (!oif || dev->ifindex == oif)
return 2;
if ((dev->flags & IFF_LOOPBACK) &&
rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif)
return 1;
return 0;
}
static inline int rt6_check_neigh(struct rt6_info *rt)
{
struct neighbour *neigh;
int m;
rcu_read_lock();
neigh = dst_get_neighbour_noref(&rt->dst);
if (rt->rt6i_flags & RTF_NONEXTHOP ||
!(rt->rt6i_flags & RTF_GATEWAY))
m = 1;
else if (neigh) {
read_lock_bh(&neigh->lock);
if (neigh->nud_state & NUD_VALID)
m = 2;
#ifdef CONFIG_IPV6_ROUTER_PREF
else if (neigh->nud_state & NUD_FAILED)
m = 0;
#endif
else
m = 1;
read_unlock_bh(&neigh->lock);
} else
m = 0;
rcu_read_unlock();
return m;
}
static int rt6_score_route(struct rt6_info *rt, int oif,
int strict)
{
int m, n;
m = rt6_check_dev(rt, oif);
if (!m && (strict & RT6_LOOKUP_F_IFACE))
return -1;
#ifdef CONFIG_IPV6_ROUTER_PREF
m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
#endif
n = rt6_check_neigh(rt);
if (!n && (strict & RT6_LOOKUP_F_REACHABLE))
return -1;
return m;
}
static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
int *mpri, struct rt6_info *match)
{
int m;
if (rt6_check_expired(rt))
goto out;
m = rt6_score_route(rt, oif, strict);
if (m < 0)
goto out;
if (m > *mpri) {
if (strict & RT6_LOOKUP_F_REACHABLE)
rt6_probe(match);
*mpri = m;
match = rt;
} else if (strict & RT6_LOOKUP_F_REACHABLE) {
rt6_probe(rt);
}
out:
return match;
}
static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
struct rt6_info *rr_head,
u32 metric, int oif, int strict)
{
struct rt6_info *rt, *match;
int mpri = -1;
match = NULL;
for (rt = rr_head; rt && rt->rt6i_metric == metric;
rt = rt->dst.rt6_next)
match = find_match(rt, oif, strict, &mpri, match);
for (rt = fn->leaf; rt && rt != rr_head && rt->rt6i_metric == metric;
rt = rt->dst.rt6_next)
match = find_match(rt, oif, strict, &mpri, match);
return match;
}
static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
{
struct rt6_info *match, *rt0;
struct net *net;
rt0 = fn->rr_ptr;
if (!rt0)
fn->rr_ptr = rt0 = fn->leaf;
match = find_rr_leaf(fn, rt0, rt0->rt6i_metric, oif, strict);
if (!match &&
(strict & RT6_LOOKUP_F_REACHABLE)) {
struct rt6_info *next = rt0->dst.rt6_next;
/* no entries matched; do round-robin */
if (!next || next->rt6i_metric != rt0->rt6i_metric)
next = fn->leaf;
if (next != rt0)
fn->rr_ptr = next;
}
net = dev_net(rt0->dst.dev);
return match ? match : net->ipv6.ip6_null_entry;
}
#ifdef CONFIG_IPV6_ROUTE_INFO
int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
const struct in6_addr *gwaddr)
{
struct net *net = dev_net(dev);
struct route_info *rinfo = (struct route_info *) opt;
struct in6_addr prefix_buf, *prefix;
unsigned int pref;
unsigned long lifetime;
struct rt6_info *rt;
if (len < sizeof(struct route_info)) {
return -EINVAL;
}
/* Sanity check for prefix_len and length */
if (rinfo->length > 3) {
return -EINVAL;
} else if (rinfo->prefix_len > 128) {
return -EINVAL;
} else if (rinfo->prefix_len > 64) {
if (rinfo->length < 2) {
return -EINVAL;
}
} else if (rinfo->prefix_len > 0) {
if (rinfo->length < 1) {
return -EINVAL;
}
}
pref = rinfo->route_pref;
if (pref == ICMPV6_ROUTER_PREF_INVALID)
return -EINVAL;
lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
if (rinfo->length == 3)
prefix = (struct in6_addr *)rinfo->prefix;
else {
/* this function is safe */
ipv6_addr_prefix(&prefix_buf,
(struct in6_addr *)rinfo->prefix,
rinfo->prefix_len);
prefix = &prefix_buf;
}
rt = rt6_get_route_info(net, prefix, rinfo->prefix_len, gwaddr,
dev->ifindex);
if (rt && !lifetime) {
ip6_del_rt(rt);
rt = NULL;
}
if (!rt && lifetime)
rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev->ifindex,
pref);
else if (rt)
rt->rt6i_flags = RTF_ROUTEINFO |
(rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
if (rt) {
if (!addrconf_finite_timeout(lifetime))
rt6_clean_expires(rt);
else
rt6_set_expires(rt, jiffies + HZ * lifetime);
dst_release(&rt->dst);
}
return 0;
}
#endif
#define BACKTRACK(__net, saddr) \
do { \
if (rt == __net->ipv6.ip6_null_entry) { \
struct fib6_node *pn; \
while (1) { \
if (fn->fn_flags & RTN_TL_ROOT) \
goto out; \
pn = fn->parent; \
if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn) \
fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr); \
else \
fn = pn; \
if (fn->fn_flags & RTN_RTINFO) \
goto restart; \
} \
} \
} while (0)
static struct rt6_info *ip6_pol_route_lookup(struct net *net,
struct fib6_table *table,
struct flowi6 *fl6, int flags)
{
struct fib6_node *fn;
struct rt6_info *rt;
read_lock_bh(&table->tb6_lock);
fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
restart:
rt = fn->leaf;
rt = rt6_device_match(net, rt, &fl6->saddr, fl6->flowi6_oif, flags);
BACKTRACK(net, &fl6->saddr);
out:
dst_use(&rt->dst, jiffies);
read_unlock_bh(&table->tb6_lock);
return rt;
}
struct dst_entry * ip6_route_lookup(struct net *net, struct flowi6 *fl6,
int flags)
{
return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_lookup);
}
EXPORT_SYMBOL_GPL(ip6_route_lookup);
struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
const struct in6_addr *saddr, int oif, int strict)
{
struct flowi6 fl6 = {
.flowi6_oif = oif,
.daddr = *daddr,
};
struct dst_entry *dst;
int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
if (saddr) {
memcpy(&fl6.saddr, saddr, sizeof(*saddr));
flags |= RT6_LOOKUP_F_HAS_SADDR;
}
dst = fib6_rule_lookup(net, &fl6, flags, ip6_pol_route_lookup);
if (dst->error == 0)
return (struct rt6_info *) dst;
dst_release(dst);
return NULL;
}
EXPORT_SYMBOL(rt6_lookup);
/* ip6_ins_rt is called with FREE table->tb6_lock.
It takes new route entry, the addition fails by any reason the
route is freed. In any case, if caller does not hold it, it may
be destroyed.
*/
static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info)
{
int err;
struct fib6_table *table;
table = rt->rt6i_table;
write_lock_bh(&table->tb6_lock);
err = fib6_add(&table->tb6_root, rt, info);
write_unlock_bh(&table->tb6_lock);
return err;
}
int ip6_ins_rt(struct rt6_info *rt)
{
struct nl_info info = {
.nl_net = dev_net(rt->dst.dev),
};
return __ip6_ins_rt(rt, &info);
}
static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort,
const struct in6_addr *daddr,
const struct in6_addr *saddr)
{
struct rt6_info *rt;
/*
* Clone the route.
*/
rt = ip6_rt_copy(ort, daddr);
if (rt) {
int attempts = !in_softirq();
if (!(rt->rt6i_flags & RTF_GATEWAY)) {
if (ort->rt6i_dst.plen != 128 &&
ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
rt->rt6i_flags |= RTF_ANYCAST;
rt->rt6i_gateway = *daddr;
}
rt->rt6i_flags |= RTF_CACHE;
#ifdef CONFIG_IPV6_SUBTREES
if (rt->rt6i_src.plen && saddr) {
rt->rt6i_src.addr = *saddr;
rt->rt6i_src.plen = 128;
}
#endif
retry:
if (rt6_bind_neighbour(rt, rt->dst.dev)) {
struct net *net = dev_net(rt->dst.dev);
int saved_rt_min_interval =
net->ipv6.sysctl.ip6_rt_gc_min_interval;
int saved_rt_elasticity =
net->ipv6.sysctl.ip6_rt_gc_elasticity;
if (attempts-- > 0) {
net->ipv6.sysctl.ip6_rt_gc_elasticity = 1;
net->ipv6.sysctl.ip6_rt_gc_min_interval = 0;
ip6_dst_gc(&net->ipv6.ip6_dst_ops);
net->ipv6.sysctl.ip6_rt_gc_elasticity =
saved_rt_elasticity;
net->ipv6.sysctl.ip6_rt_gc_min_interval =
saved_rt_min_interval;
goto retry;
}
if (net_ratelimit())
printk(KERN_WARNING
"ipv6: Neighbour table overflow.\n");
dst_free(&rt->dst);
return NULL;
}
}
return rt;
}
static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort,
const struct in6_addr *daddr)
{
struct rt6_info *rt = ip6_rt_copy(ort, daddr);
if (rt) {
rt->rt6i_flags |= RTF_CACHE;
dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour_noref_raw(&ort->dst)));
}
return rt;
}
static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int oif,
struct flowi6 *fl6, int flags, bool input)
{
struct fib6_node *fn;
struct rt6_info *rt, *nrt;
int strict = 0;
int attempts = 3;
int err;
int reachable = net->ipv6.devconf_all->forwarding ? 0 : RT6_LOOKUP_F_REACHABLE;
int local = RTF_NONEXTHOP;
strict |= flags & RT6_LOOKUP_F_IFACE;
if (input)
local |= RTF_LOCAL;
relookup:
read_lock_bh(&table->tb6_lock);
restart_2:
fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
restart:
rt = rt6_select(fn, oif, strict | reachable);
BACKTRACK(net, &fl6->saddr);
if (rt == net->ipv6.ip6_null_entry ||
rt->rt6i_flags & RTF_CACHE)
goto out;
dst_hold(&rt->dst);
read_unlock_bh(&table->tb6_lock);
if (!dst_get_neighbour_noref_raw(&rt->dst) &&
!(rt->rt6i_flags & local))
nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
else if (!(rt->dst.flags & DST_HOST))
nrt = rt6_alloc_clone(rt, &fl6->daddr);
else
goto out2;
dst_release(&rt->dst);
rt = nrt ? : net->ipv6.ip6_null_entry;
dst_hold(&rt->dst);
if (nrt) {
err = ip6_ins_rt(nrt);
if (!err)
goto out2;
}
if (--attempts <= 0)
goto out2;
/*
* Race condition! In the gap, when table->tb6_lock was
* released someone could insert this route. Relookup.
*/
dst_release(&rt->dst);
goto relookup;
out:
if (reachable) {
reachable = 0;
goto restart_2;
}
dst_hold(&rt->dst);
read_unlock_bh(&table->tb6_lock);
out2:
rt->dst.lastuse = jiffies;
rt->dst.__use++;
return rt;
}
static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table,
struct flowi6 *fl6, int flags)
{
return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags, true);
}
static struct dst_entry *ip6_route_input_lookup(struct net *net,
struct net_device *dev,
struct flowi6 *fl6, int flags)
{
if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
flags |= RT6_LOOKUP_F_IFACE;
return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_input);
}
void ip6_route_input(struct sk_buff *skb)
{
const struct ipv6hdr *iph = ipv6_hdr(skb);
struct net *net = dev_net(skb->dev);
int flags = RT6_LOOKUP_F_HAS_SADDR;
struct flowi6 fl6 = {
.flowi6_iif = skb->dev->ifindex,
.daddr = iph->daddr,
.saddr = iph->saddr,
.flowlabel = (* (__be32 *) iph) & IPV6_FLOWINFO_MASK,
.flowi6_mark = skb->mark,
.flowi6_proto = iph->nexthdr,
};
skb_dst_set(skb, ip6_route_input_lookup(net, skb->dev, &fl6, flags));
}
static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table,
struct flowi6 *fl6, int flags)
{
return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags, false);
}
struct dst_entry * ip6_route_output(struct net *net, const struct sock *sk,
struct flowi6 *fl6)
{
int flags = 0;
if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr))
flags |= RT6_LOOKUP_F_IFACE;
if (!ipv6_addr_any(&fl6->saddr))
flags |= RT6_LOOKUP_F_HAS_SADDR;
else if (sk)
flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output);
}
EXPORT_SYMBOL(ip6_route_output);
struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
{
struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
struct dst_entry *new = NULL;
rt = dst_alloc(&ip6_dst_blackhole_ops, ort->dst.dev, 1, 0, 0);
if (rt) {
memset(&rt->rt6i_table, 0, sizeof(*rt) - sizeof(struct dst_entry));
new = &rt->dst;
new->__use = 1;
new->input = dst_discard;
new->output = dst_discard;
if (dst_metrics_read_only(&ort->dst))
new->_metrics = ort->dst._metrics;
else
dst_copy_metrics(new, &ort->dst);
rt->rt6i_idev = ort->rt6i_idev;
if (rt->rt6i_idev)
in6_dev_hold(rt->rt6i_idev);
rt->rt6i_gateway = ort->rt6i_gateway;
rt->rt6i_flags = ort->rt6i_flags;
rt6_clean_expires(rt);
rt->rt6i_metric = 0;
memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
#ifdef CONFIG_IPV6_SUBTREES
memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
#endif
dst_free(new);
}
dst_release(dst_orig);
return new ? new : ERR_PTR(-ENOMEM);
}
/*
* Destination cache support functions
*/
static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
{
struct rt6_info *rt;
rt = (struct rt6_info *) dst;
if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie)) {
if (rt->rt6i_peer_genid != rt6_peer_genid()) {
if (!rt->rt6i_peer)
rt6_bind_peer(rt, 0);
rt->rt6i_peer_genid = rt6_peer_genid();
}
return dst;
}
return NULL;
}
static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
{
struct rt6_info *rt = (struct rt6_info *) dst;
if (rt) {
if (rt->rt6i_flags & RTF_CACHE) {
if (rt6_check_expired(rt)) {
ip6_del_rt(rt);
dst = NULL;
}
} else {
dst_release(dst);
dst = NULL;
}
}
return dst;
}
static void ip6_link_failure(struct sk_buff *skb)
{
struct rt6_info *rt;
icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
rt = (struct rt6_info *) skb_dst(skb);
if (rt) {
if (rt->rt6i_flags & RTF_CACHE) {
dst_hold(&rt->dst);
if (ip6_del_rt(rt))
dst_free(&rt->dst);
} else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) {
rt->rt6i_node->fn_sernum = -1;
}
}
}
static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
{
struct rt6_info *rt6 = (struct rt6_info*)dst;
if (mtu < dst_mtu(dst) && rt6->rt6i_dst.plen == 128) {
rt6->rt6i_flags |= RTF_MODIFIED;
if (mtu < IPV6_MIN_MTU) {
u32 features = dst_metric(dst, RTAX_FEATURES);
mtu = IPV6_MIN_MTU;
features |= RTAX_FEATURE_ALLFRAG;
dst_metric_set(dst, RTAX_FEATURES, features);
}
dst_metric_set(dst, RTAX_MTU, mtu);
}
}
static unsigned int ip6_default_advmss(const struct dst_entry *dst)
{
struct net_device *dev = dst->dev;
unsigned int mtu = dst_mtu(dst);
struct net *net = dev_net(dev);
mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
/*
* Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
* corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
* IPV6_MAXPLEN is also valid and means: "any MSS,
* rely only on pmtu discovery"
*/
if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
mtu = IPV6_MAXPLEN;
return mtu;
}
static unsigned int ip6_mtu(const struct dst_entry *dst)
{
struct inet6_dev *idev;
unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
if (mtu)
return mtu;
mtu = IPV6_MIN_MTU;
rcu_read_lock();
idev = __in6_dev_get(dst->dev);
if (idev)
mtu = idev->cnf.mtu6;
rcu_read_unlock();
return mtu;
}
static struct dst_entry *icmp6_dst_gc_list;
static DEFINE_SPINLOCK(icmp6_dst_lock);
struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
struct neighbour *neigh,
struct flowi6 *fl6)
{
struct dst_entry *dst;
struct rt6_info *rt;
struct inet6_dev *idev = in6_dev_get(dev);
struct net *net = dev_net(dev);
if (unlikely(!idev))
return ERR_PTR(-ENODEV);
rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, dev, 0);
if (unlikely(!rt)) {
in6_dev_put(idev);
dst = ERR_PTR(-ENOMEM);
goto out;
}
if (neigh)
neigh_hold(neigh);
else {
neigh = ip6_neigh_lookup(&rt->dst, &fl6->daddr);
if (IS_ERR(neigh)) {
in6_dev_put(idev);
dst_free(&rt->dst);
return ERR_CAST(neigh);
}
}
rt->dst.flags |= DST_HOST;
rt->dst.output = ip6_output;
dst_set_neighbour(&rt->dst, neigh);
atomic_set(&rt->dst.__refcnt, 1);
rt->rt6i_dst.addr = fl6->daddr;
rt->rt6i_dst.plen = 128;
rt->rt6i_idev = idev;
dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
spin_lock_bh(&icmp6_dst_lock);
rt->dst.next = icmp6_dst_gc_list;
icmp6_dst_gc_list = &rt->dst;
spin_unlock_bh(&icmp6_dst_lock);
fib6_force_start_gc(net);
dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
out:
return dst;
}
int icmp6_dst_gc(void)
{
struct dst_entry *dst, **pprev;
int more = 0;
spin_lock_bh(&icmp6_dst_lock);
pprev = &icmp6_dst_gc_list;
while ((dst = *pprev) != NULL) {
if (!atomic_read(&dst->__refcnt)) {
*pprev = dst->next;
dst_free(dst);
} else {
pprev = &dst->next;
++more;
}
}
spin_unlock_bh(&icmp6_dst_lock);
return more;
}
static void icmp6_clean_all(int (*func)(struct rt6_info *rt, void *arg),
void *arg)
{
struct dst_entry *dst, **pprev;
spin_lock_bh(&icmp6_dst_lock);
pprev = &icmp6_dst_gc_list;
while ((dst = *pprev) != NULL) {
struct rt6_info *rt = (struct rt6_info *) dst;
if (func(rt, arg)) {
*pprev = dst->next;
dst_free(dst);
} else {
pprev = &dst->next;
}
}
spin_unlock_bh(&icmp6_dst_lock);
}
static int ip6_dst_gc(struct dst_ops *ops)
{
unsigned long now = jiffies;
struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
int entries;
entries = dst_entries_get_fast(ops);
if (time_after(rt_last_gc + rt_min_interval, now) &&
entries <= rt_max_size)
goto out;
net->ipv6.ip6_rt_gc_expire++;
fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net);
net->ipv6.ip6_rt_last_gc = now;
entries = dst_entries_get_slow(ops);
if (entries < ops->gc_thresh)
net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
out:
net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
return entries > rt_max_size;
}
/* Clean host part of a prefix. Not necessary in radix tree,
but results in cleaner routing tables.
Remove it only when all the things will work!
*/
int ip6_dst_hoplimit(struct dst_entry *dst)
{
int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT);
if (hoplimit == 0) {
struct net_device *dev = dst->dev;
struct inet6_dev *idev;
rcu_read_lock();
idev = __in6_dev_get(dev);
if (idev)
hoplimit = idev->cnf.hop_limit;
else
hoplimit = dev_net(dev)->ipv6.devconf_all->hop_limit;
rcu_read_unlock();
}
return hoplimit;
}
EXPORT_SYMBOL(ip6_dst_hoplimit);
/*
*
*/
int ip6_route_add(struct fib6_config *cfg)
{
int err;
struct net *net = cfg->fc_nlinfo.nl_net;
struct rt6_info *rt = NULL;
struct net_device *dev = NULL;
struct inet6_dev *idev = NULL;
struct fib6_table *table;
int addr_type;
if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
return -EINVAL;
#ifndef CONFIG_IPV6_SUBTREES
if (cfg->fc_src_len)
return -EINVAL;
#endif
if (cfg->fc_ifindex) {
err = -ENODEV;
dev = dev_get_by_index(net, cfg->fc_ifindex);
if (!dev)
goto out;
idev = in6_dev_get(dev);
if (!idev)
goto out;
}
if (cfg->fc_metric == 0)
cfg->fc_metric = IP6_RT_PRIO_USER;
err = -ENOBUFS;
if (cfg->fc_nlinfo.nlh &&
!(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
table = fib6_get_table(net, cfg->fc_table);
if (!table) {
printk(KERN_WARNING "IPv6: NLM_F_CREATE should be specified when creating new route\n");
table = fib6_new_table(net, cfg->fc_table);
}
} else {
table = fib6_new_table(net, cfg->fc_table);
}
if (!table)
goto out;
rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, NULL, (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT);
if (!rt) {
err = -ENOMEM;
goto out;
}
rt->dst.obsolete = -1;
if (cfg->fc_flags & RTF_EXPIRES)
rt6_set_expires(rt, jiffies +
clock_t_to_jiffies(cfg->fc_expires));
else
rt6_clean_expires(rt);
if (cfg->fc_protocol == RTPROT_UNSPEC)
cfg->fc_protocol = RTPROT_BOOT;
rt->rt6i_protocol = cfg->fc_protocol;
addr_type = ipv6_addr_type(&cfg->fc_dst);
if (addr_type & IPV6_ADDR_MULTICAST)
rt->dst.input = ip6_mc_input;
else if (cfg->fc_flags & RTF_LOCAL)
rt->dst.input = ip6_input;
else
rt->dst.input = ip6_forward;
rt->dst.output = ip6_output;
ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
rt->rt6i_dst.plen = cfg->fc_dst_len;
if (rt->rt6i_dst.plen == 128)
rt->dst.flags |= DST_HOST;
if (!(rt->dst.flags & DST_HOST) && cfg->fc_mx) {
u32 *metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
if (!metrics) {
err = -ENOMEM;
goto out;
}
dst_init_metrics(&rt->dst, metrics, 0);
}
#ifdef CONFIG_IPV6_SUBTREES
ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
rt->rt6i_src.plen = cfg->fc_src_len;
#endif
rt->rt6i_metric = cfg->fc_metric;
/* We cannot add true routes via loopback here,
they would result in kernel looping; promote them to reject routes
*/
if ((cfg->fc_flags & RTF_REJECT) ||
(dev && (dev->flags & IFF_LOOPBACK) &&
!(addr_type & IPV6_ADDR_LOOPBACK) &&
!(cfg->fc_flags & RTF_LOCAL))) {
/* hold loopback dev/idev if we haven't done so. */
if (dev != net->loopback_dev) {
if (dev) {
dev_put(dev);
in6_dev_put(idev);
}
dev = net->loopback_dev;
dev_hold(dev);
idev = in6_dev_get(dev);
if (!idev) {
err = -ENODEV;
goto out;
}
}
rt->dst.output = ip6_pkt_discard_out;
rt->dst.input = ip6_pkt_discard;
rt->dst.error = -ENETUNREACH;
rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
goto install_route;
}
if (cfg->fc_flags & RTF_GATEWAY) {
const struct in6_addr *gw_addr;
int gwa_type;
gw_addr = &cfg->fc_gateway;
rt->rt6i_gateway = *gw_addr;
gwa_type = ipv6_addr_type(gw_addr);
if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
struct rt6_info *grt;
/* IPv6 strictly inhibits using not link-local
addresses as nexthop address.
Otherwise, router will not able to send redirects.
It is very good, but in some (rare!) circumstances
(SIT, PtP, NBMA NOARP links) it is handy to allow
some exceptions. --ANK
*/
err = -EINVAL;
if (!(gwa_type & IPV6_ADDR_UNICAST))
goto out;
grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, 1);
err = -EHOSTUNREACH;
if (!grt)
goto out;
if (dev) {
if (dev != grt->dst.dev) {
dst_release(&grt->dst);
goto out;
}
} else {
dev = grt->dst.dev;
idev = grt->rt6i_idev;
dev_hold(dev);
in6_dev_hold(grt->rt6i_idev);
}
if (!(grt->rt6i_flags & RTF_GATEWAY))
err = 0;
dst_release(&grt->dst);
if (err)
goto out;
}
err = -EINVAL;
if (!dev || (dev->flags & IFF_LOOPBACK))
goto out;
}
err = -ENODEV;
if (!dev)
goto out;
if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
err = -EINVAL;
goto out;
}
rt->rt6i_prefsrc.addr = cfg->fc_prefsrc;
rt->rt6i_prefsrc.plen = 128;
} else
rt->rt6i_prefsrc.plen = 0;
if (cfg->fc_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) {
err = rt6_bind_neighbour(rt, dev);
if (err)
goto out;
}
rt->rt6i_flags = cfg->fc_flags;
install_route:
if (cfg->fc_mx) {
struct nlattr *nla;
int remaining;
nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
int type = nla_type(nla);
if (type) {
if (type > RTAX_MAX) {
err = -EINVAL;
goto out;
}
dst_metric_set(&rt->dst, type, nla_get_u32(nla));
}
}
}
rt->dst.dev = dev;
rt->rt6i_idev = idev;
rt->rt6i_table = table;
cfg->fc_nlinfo.nl_net = dev_net(dev);
return __ip6_ins_rt(rt, &cfg->fc_nlinfo);
out:
if (dev)
dev_put(dev);
if (idev)
in6_dev_put(idev);
if (rt)
dst_free(&rt->dst);
return err;
}
static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
{
int err;
struct fib6_table *table;
struct net *net = dev_net(rt->dst.dev);
if (rt == net->ipv6.ip6_null_entry) {
err = -ENOENT;
goto out;
}
table = rt->rt6i_table;
write_lock_bh(&table->tb6_lock);
err = fib6_del(rt, info);
write_unlock_bh(&table->tb6_lock);
out:
dst_release(&rt->dst);
return err;
}
int ip6_del_rt(struct rt6_info *rt)
{
struct nl_info info = {
.nl_net = dev_net(rt->dst.dev),
};
return __ip6_del_rt(rt, &info);
}
static int ip6_route_del(struct fib6_config *cfg)
{
struct fib6_table *table;
struct fib6_node *fn;
struct rt6_info *rt;
int err = -ESRCH;
table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
if (!table)
return err;
read_lock_bh(&table->tb6_lock);
fn = fib6_locate(&table->tb6_root,
&cfg->fc_dst, cfg->fc_dst_len,
&cfg->fc_src, cfg->fc_src_len);
if (fn) {
for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
if (cfg->fc_ifindex &&
(!rt->dst.dev ||
rt->dst.dev->ifindex != cfg->fc_ifindex))
continue;
if (cfg->fc_flags & RTF_GATEWAY &&
!ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
continue;
if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
continue;
dst_hold(&rt->dst);
read_unlock_bh(&table->tb6_lock);
return __ip6_del_rt(rt, &cfg->fc_nlinfo);
}
}
read_unlock_bh(&table->tb6_lock);
return err;
}
/*
* Handle redirects
*/
struct ip6rd_flowi {
struct flowi6 fl6;
struct in6_addr gateway;
};
static struct rt6_info *__ip6_route_redirect(struct net *net,
struct fib6_table *table,
struct flowi6 *fl6,
int flags)
{
struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
struct rt6_info *rt;
struct fib6_node *fn;
/*
* Get the "current" route for this destination and
* check if the redirect has come from approriate router.
*
* RFC 2461 specifies that redirects should only be
* accepted if they come from the nexthop to the target.
* Due to the way the routes are chosen, this notion
* is a bit fuzzy and one might need to check all possible
* routes.
*/
read_lock_bh(&table->tb6_lock);
fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
restart:
for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
/*
* Current route is on-link; redirect is always invalid.
*
* Seems, previous statement is not true. It could
* be node, which looks for us as on-link (f.e. proxy ndisc)
* But then router serving it might decide, that we should
* know truth 8)8) --ANK (980726).
*/
if (rt6_check_expired(rt))
continue;
if (!(rt->rt6i_flags & RTF_GATEWAY))
continue;
if (fl6->flowi6_oif != rt->dst.dev->ifindex)
continue;
if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway))
continue;
break;
}
if (!rt)
rt = net->ipv6.ip6_null_entry;
BACKTRACK(net, &fl6->saddr);
out:
dst_hold(&rt->dst);
read_unlock_bh(&table->tb6_lock);
return rt;
};
static struct rt6_info *ip6_route_redirect(const struct in6_addr *dest,
const struct in6_addr *src,
const struct in6_addr *gateway,
struct net_device *dev)
{
int flags = RT6_LOOKUP_F_HAS_SADDR;
struct net *net = dev_net(dev);
struct ip6rd_flowi rdfl = {
.fl6 = {
.flowi6_oif = dev->ifindex,
.daddr = *dest,
.saddr = *src,
},
};
rdfl.gateway = *gateway;
if (rt6_need_strict(dest))
flags |= RT6_LOOKUP_F_IFACE;
return (struct rt6_info *)fib6_rule_lookup(net, &rdfl.fl6,
flags, __ip6_route_redirect);
}
void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
const struct in6_addr *saddr,
struct neighbour *neigh, u8 *lladdr, int on_link)
{
struct rt6_info *rt, *nrt = NULL;
struct netevent_redirect netevent;
struct net *net = dev_net(neigh->dev);
rt = ip6_route_redirect(dest, src, saddr, neigh->dev);
if (rt == net->ipv6.ip6_null_entry) {
if (net_ratelimit())
printk(KERN_DEBUG "rt6_redirect: source isn't a valid nexthop "
"for redirect target\n");
goto out;
}
/*
* We have finally decided to accept it.
*/
neigh_update(neigh, lladdr, NUD_STALE,
NEIGH_UPDATE_F_WEAK_OVERRIDE|
NEIGH_UPDATE_F_OVERRIDE|
(on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
NEIGH_UPDATE_F_ISROUTER))
);
/*
* Redirect received -> path was valid.
* Look, redirects are sent only in response to data packets,
* so that this nexthop apparently is reachable. --ANK
*/
dst_confirm(&rt->dst);
/* Duplicate redirect: silently ignore. */
if (neigh == dst_get_neighbour_noref_raw(&rt->dst))
goto out;
nrt = ip6_rt_copy(rt, dest);
if (!nrt)
goto out;
nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
if (on_link)
nrt->rt6i_flags &= ~RTF_GATEWAY;
nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
dst_set_neighbour(&nrt->dst, neigh_clone(neigh));
if (ip6_ins_rt(nrt))
goto out;
netevent.old = &rt->dst;
netevent.new = &nrt->dst;
call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
if (rt->rt6i_flags & RTF_CACHE) {
ip6_del_rt(rt);
return;
}
out:
dst_release(&rt->dst);
}
/*
* Handle ICMP "packet too big" messages
* i.e. Path MTU discovery
*/
static void rt6_do_pmtu_disc(const struct in6_addr *daddr, const struct in6_addr *saddr,
struct net *net, u32 pmtu, int ifindex)
{
struct rt6_info *rt, *nrt;
int allfrag = 0;
again:
rt = rt6_lookup(net, daddr, saddr, ifindex, 0);
if (!rt)
return;
if (rt6_check_expired(rt)) {
ip6_del_rt(rt);
goto again;
}
if (pmtu >= dst_mtu(&rt->dst))
goto out;
if (pmtu < IPV6_MIN_MTU) {
/*
* According to RFC2460, PMTU is set to the IPv6 Minimum Link
* MTU (1280) and a fragment header should always be included
* after a node receiving Too Big message reporting PMTU is
* less than the IPv6 Minimum Link MTU.
*/
pmtu = IPV6_MIN_MTU;
allfrag = 1;
}
/* New mtu received -> path was valid.
They are sent only in response to data packets,
so that this nexthop apparently is reachable. --ANK
*/
dst_confirm(&rt->dst);
/* Host route. If it is static, it would be better
not to override it, but add new one, so that
when cache entry will expire old pmtu
would return automatically.
*/
if (rt->rt6i_flags & RTF_CACHE) {
dst_metric_set(&rt->dst, RTAX_MTU, pmtu);
if (allfrag) {
u32 features = dst_metric(&rt->dst, RTAX_FEATURES);
features |= RTAX_FEATURE_ALLFRAG;
dst_metric_set(&rt->dst, RTAX_FEATURES, features);
}
rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
rt->rt6i_flags |= RTF_MODIFIED;
goto out;
}
/* Network route.
Two cases are possible:
1. It is connected route. Action: COW
2. It is gatewayed route or NONEXTHOP route. Action: clone it.
*/
if (!dst_get_neighbour_noref_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
nrt = rt6_alloc_cow(rt, daddr, saddr);
else
nrt = rt6_alloc_clone(rt, daddr);
if (nrt) {
dst_metric_set(&nrt->dst, RTAX_MTU, pmtu);
if (allfrag) {
u32 features = dst_metric(&nrt->dst, RTAX_FEATURES);
features |= RTAX_FEATURE_ALLFRAG;
dst_metric_set(&nrt->dst, RTAX_FEATURES, features);
}
/* According to RFC 1981, detecting PMTU increase shouldn't be
* happened within 5 mins, the recommended timer is 10 mins.
* Here this route expiration time is set to ip6_rt_mtu_expires
* which is 10 mins. After 10 mins the decreased pmtu is expired
* and detecting PMTU increase will be automatically happened.
*/
rt6_update_expires(nrt, net->ipv6.sysctl.ip6_rt_mtu_expires);
nrt->rt6i_flags |= RTF_DYNAMIC;
ip6_ins_rt(nrt);
}
out:
dst_release(&rt->dst);
}
void rt6_pmtu_discovery(const struct in6_addr *daddr, const struct in6_addr *saddr,
struct net_device *dev, u32 pmtu)
{
struct net *net = dev_net(dev);
/*
* RFC 1981 states that a node "MUST reduce the size of the packets it
* is sending along the path" that caused the Packet Too Big message.
* Since it's not possible in the general case to determine which
* interface was used to send the original packet, we update the MTU
* on the interface that will be used to send future packets. We also
* update the MTU on the interface that received the Packet Too Big in
* case the original packet was forced out that interface with
* SO_BINDTODEVICE or similar. This is the next best thing to the
* correct behaviour, which would be to update the MTU on all
* interfaces.
*/
rt6_do_pmtu_disc(daddr, saddr, net, pmtu, 0);
rt6_do_pmtu_disc(daddr, saddr, net, pmtu, dev->ifindex);
}
/*
* Misc support functions
*/
static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
const struct in6_addr *dest)
{
struct net *net = dev_net(ort->dst.dev);
struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops,
ort->dst.dev, 0);
if (rt) {
rt->dst.input = ort->dst.input;
rt->dst.output = ort->dst.output;
rt->dst.flags |= DST_HOST;
rt->rt6i_dst.addr = *dest;
rt->rt6i_dst.plen = 128;
dst_copy_metrics(&rt->dst, &ort->dst);
rt->dst.error = ort->dst.error;
rt->rt6i_idev = ort->rt6i_idev;
if (rt->rt6i_idev)
in6_dev_hold(rt->rt6i_idev);
rt->dst.lastuse = jiffies;
rt->rt6i_gateway = ort->rt6i_gateway;
rt->rt6i_flags = ort->rt6i_flags;
if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) ==
(RTF_DEFAULT | RTF_ADDRCONF))
rt6_set_from(rt, ort);
else
rt6_clean_expires(rt);
rt->rt6i_metric = 0;
#ifdef CONFIG_IPV6_SUBTREES
memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
#endif
memcpy(&rt->rt6i_prefsrc, &ort->rt6i_prefsrc, sizeof(struct rt6key));
rt->rt6i_table = ort->rt6i_table;
}
return rt;
}
#ifdef CONFIG_IPV6_ROUTE_INFO
static struct rt6_info *rt6_get_route_info(struct net *net,
const struct in6_addr *prefix, int prefixlen,
const struct in6_addr *gwaddr, int ifindex)
{
struct fib6_node *fn;
struct rt6_info *rt = NULL;
struct fib6_table *table;
table = fib6_get_table(net, RT6_TABLE_INFO);
if (!table)
return NULL;
write_lock_bh(&table->tb6_lock);
fn = fib6_locate(&table->tb6_root, prefix ,prefixlen, NULL, 0);
if (!fn)
goto out;
for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
if (rt->dst.dev->ifindex != ifindex)
continue;
if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
continue;
if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr))
continue;
dst_hold(&rt->dst);
break;
}
out:
write_unlock_bh(&table->tb6_lock);
return rt;
}
static struct rt6_info *rt6_add_route_info(struct net *net,
const struct in6_addr *prefix, int prefixlen,
const struct in6_addr *gwaddr, int ifindex,
unsigned pref)
{
struct fib6_config cfg = {
.fc_table = RT6_TABLE_INFO,
.fc_metric = IP6_RT_PRIO_USER,
.fc_ifindex = ifindex,
.fc_dst_len = prefixlen,
.fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
RTF_UP | RTF_PREF(pref),
.fc_nlinfo.pid = 0,
.fc_nlinfo.nlh = NULL,
.fc_nlinfo.nl_net = net,
};
cfg.fc_dst = *prefix;
cfg.fc_gateway = *gwaddr;
/* We should treat it as a default route if prefix length is 0. */
if (!prefixlen)
cfg.fc_flags |= RTF_DEFAULT;
ip6_route_add(&cfg);
return rt6_get_route_info(net, prefix, prefixlen, gwaddr, ifindex);
}
#endif
struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev)
{
struct rt6_info *rt;
struct fib6_table *table;
table = fib6_get_table(dev_net(dev), RT6_TABLE_DFLT);
if (!table)
return NULL;
write_lock_bh(&table->tb6_lock);
for (rt = table->tb6_root.leaf; rt; rt=rt->dst.rt6_next) {
if (dev == rt->dst.dev &&
((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
ipv6_addr_equal(&rt->rt6i_gateway, addr))
break;
}
if (rt)
dst_hold(&rt->dst);
write_unlock_bh(&table->tb6_lock);
return rt;
}
struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
struct net_device *dev,
unsigned int pref)
{
struct fib6_config cfg = {
.fc_table = RT6_TABLE_DFLT,
.fc_metric = IP6_RT_PRIO_USER,
.fc_ifindex = dev->ifindex,
.fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
.fc_nlinfo.pid = 0,
.fc_nlinfo.nlh = NULL,
.fc_nlinfo.nl_net = dev_net(dev),
};
cfg.fc_gateway = *gwaddr;
ip6_route_add(&cfg);
return rt6_get_dflt_router(gwaddr, dev);
}
void rt6_purge_dflt_routers(struct net *net)
{
struct rt6_info *rt;
struct fib6_table *table;
/* NOTE: Keep consistent with rt6_get_dflt_router */
table = fib6_get_table(net, RT6_TABLE_DFLT);
if (!table)
return;
restart:
read_lock_bh(&table->tb6_lock);
for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
(!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) {
dst_hold(&rt->dst);
read_unlock_bh(&table->tb6_lock);
ip6_del_rt(rt);
goto restart;
}
}
read_unlock_bh(&table->tb6_lock);
}
static void rtmsg_to_fib6_config(struct net *net,
struct in6_rtmsg *rtmsg,
struct fib6_config *cfg)
{
memset(cfg, 0, sizeof(*cfg));
cfg->fc_table = RT6_TABLE_MAIN;
cfg->fc_ifindex = rtmsg->rtmsg_ifindex;
cfg->fc_metric = rtmsg->rtmsg_metric;
cfg->fc_expires = rtmsg->rtmsg_info;
cfg->fc_dst_len = rtmsg->rtmsg_dst_len;
cfg->fc_src_len = rtmsg->rtmsg_src_len;
cfg->fc_flags = rtmsg->rtmsg_flags;
cfg->fc_nlinfo.nl_net = net;
cfg->fc_dst = rtmsg->rtmsg_dst;
cfg->fc_src = rtmsg->rtmsg_src;
cfg->fc_gateway = rtmsg->rtmsg_gateway;
}
int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
{
struct fib6_config cfg;
struct in6_rtmsg rtmsg;
int err;
switch(cmd) {
case SIOCADDRT: /* Add a route */
case SIOCDELRT: /* Delete a route */
if (!capable(CAP_NET_ADMIN))
return -EPERM;
err = copy_from_user(&rtmsg, arg,
sizeof(struct in6_rtmsg));
if (err)
return -EFAULT;
rtmsg_to_fib6_config(net, &rtmsg, &cfg);
rtnl_lock();
switch (cmd) {
case SIOCADDRT:
err = ip6_route_add(&cfg);
break;
case SIOCDELRT:
err = ip6_route_del(&cfg);
break;
default:
err = -EINVAL;
}
rtnl_unlock();
return err;
}
return -EINVAL;
}
/*
* Drop the packet on the floor
*/
static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
{
int type;
struct dst_entry *dst = skb_dst(skb);
switch (ipstats_mib_noroutes) {
case IPSTATS_MIB_INNOROUTES:
type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
if (type == IPV6_ADDR_ANY) {
IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
IPSTATS_MIB_INADDRERRORS);
break;
}
/* FALLTHROUGH */
case IPSTATS_MIB_OUTNOROUTES:
IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
ipstats_mib_noroutes);
break;
}
icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
kfree_skb(skb);
return 0;
}
static int ip6_pkt_discard(struct sk_buff *skb)
{
return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
}
static int ip6_pkt_discard_out(struct sk_buff *skb)
{
skb->dev = skb_dst(skb)->dev;
return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
}
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
static int ip6_pkt_prohibit(struct sk_buff *skb)
{
return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
}
static int ip6_pkt_prohibit_out(struct sk_buff *skb)
{
skb->dev = skb_dst(skb)->dev;
return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
}
#endif
/*
* Allocate a dst for local (unicast / anycast) address.
*/
struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
const struct in6_addr *addr,
bool anycast)
{
struct net *net = dev_net(idev->dev);
struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops,
net->loopback_dev, 0);
int err;
if (!rt) {
if (net_ratelimit())
pr_warning("IPv6: Maximum number of routes reached,"
" consider increasing route/max_size.\n");
return ERR_PTR(-ENOMEM);
}
in6_dev_hold(idev);
rt->dst.flags |= DST_HOST;
rt->dst.input = ip6_input;
rt->dst.output = ip6_output;
rt->rt6i_idev = idev;
rt->dst.obsolete = -1;
rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
if (anycast)
rt->rt6i_flags |= RTF_ANYCAST;
else
rt->rt6i_flags |= RTF_LOCAL;
err = rt6_bind_neighbour(rt, rt->dst.dev);
if (err) {
dst_free(&rt->dst);
return ERR_PTR(err);
}
rt->rt6i_dst.addr = *addr;
rt->rt6i_dst.plen = 128;
rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
atomic_set(&rt->dst.__refcnt, 1);
return rt;
}
int ip6_route_get_saddr(struct net *net,
struct rt6_info *rt,
const struct in6_addr *daddr,
unsigned int prefs,
struct in6_addr *saddr)
{
struct inet6_dev *idev = ip6_dst_idev((struct dst_entry*)rt);
int err = 0;
if (rt->rt6i_prefsrc.plen)
*saddr = rt->rt6i_prefsrc.addr;
else
err = ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
daddr, prefs, saddr);
return err;
}
/* remove deleted ip from prefsrc entries */
struct arg_dev_net_ip {
struct net_device *dev;
struct net *net;
struct in6_addr *addr;
};
static int fib6_remove_prefsrc(struct rt6_info *rt, void *arg)
{
struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
struct net *net = ((struct arg_dev_net_ip *)arg)->net;
struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
if (((void *)rt->dst.dev == dev || !dev) &&
rt != net->ipv6.ip6_null_entry &&
ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) {
/* remove prefsrc entry */
rt->rt6i_prefsrc.plen = 0;
}
return 0;
}
void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
{
struct net *net = dev_net(ifp->idev->dev);
struct arg_dev_net_ip adni = {
.dev = ifp->idev->dev,
.net = net,
.addr = &ifp->addr,
};
fib6_clean_all(net, fib6_remove_prefsrc, 0, &adni);
}
struct arg_dev_net {
struct net_device *dev;
struct net *net;
};
static int fib6_ifdown(struct rt6_info *rt, void *arg)
{
const struct arg_dev_net *adn = arg;
const struct net_device *dev = adn->dev;
if ((rt->dst.dev == dev || !dev) &&
rt != adn->net->ipv6.ip6_null_entry)
return -1;
return 0;
}
void rt6_ifdown(struct net *net, struct net_device *dev)
{
struct arg_dev_net adn = {
.dev = dev,
.net = net,
};
fib6_clean_all(net, fib6_ifdown, 0, &adn);
icmp6_clean_all(fib6_ifdown, &adn);
}
struct rt6_mtu_change_arg
{
struct net_device *dev;
unsigned mtu;
};
static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
{
struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
struct inet6_dev *idev;
/* In IPv6 pmtu discovery is not optional,
so that RTAX_MTU lock cannot disable it.
We still use this lock to block changes
caused by addrconf/ndisc.
*/
idev = __in6_dev_get(arg->dev);
if (!idev)
return 0;
/* For administrative MTU increase, there is no way to discover
IPv6 PMTU increase, so PMTU increase should be updated here.
Since RFC 1981 doesn't include administrative MTU increase
update PMTU increase is a MUST. (i.e. jumbo frame)
*/
/*
If new MTU is less than route PMTU, this new MTU will be the
lowest MTU in the path, update the route PMTU to reflect PMTU
decreases; if new MTU is greater than route PMTU, and the
old MTU is the lowest MTU in the path, update the route PMTU
to reflect the increase. In this case if the other nodes' MTU
also have the lowest MTU, TOO BIG MESSAGE will be lead to
PMTU discouvery.
*/
if (rt->dst.dev == arg->dev &&
!dst_metric_locked(&rt->dst, RTAX_MTU) &&
(dst_mtu(&rt->dst) >= arg->mtu ||
(dst_mtu(&rt->dst) < arg->mtu &&
dst_mtu(&rt->dst) == idev->cnf.mtu6))) {
dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu);
}
return 0;
}
void rt6_mtu_change(struct net_device *dev, unsigned mtu)
{
struct rt6_mtu_change_arg arg = {
.dev = dev,
.mtu = mtu,
};
fib6_clean_all(dev_net(dev), rt6_mtu_change_route, 0, &arg);
}
static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
[RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
[RTA_OIF] = { .type = NLA_U32 },
[RTA_IIF] = { .type = NLA_U32 },
[RTA_PRIORITY] = { .type = NLA_U32 },
[RTA_METRICS] = { .type = NLA_NESTED },
};
static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
struct fib6_config *cfg)
{
struct rtmsg *rtm;
struct nlattr *tb[RTA_MAX+1];
int err;
err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
if (err < 0)
goto errout;
err = -EINVAL;
rtm = nlmsg_data(nlh);
memset(cfg, 0, sizeof(*cfg));
cfg->fc_table = rtm->rtm_table;
cfg->fc_dst_len = rtm->rtm_dst_len;
cfg->fc_src_len = rtm->rtm_src_len;
cfg->fc_flags = RTF_UP;
cfg->fc_protocol = rtm->rtm_protocol;
if (rtm->rtm_type == RTN_UNREACHABLE)
cfg->fc_flags |= RTF_REJECT;
if (rtm->rtm_type == RTN_LOCAL)
cfg->fc_flags |= RTF_LOCAL;
cfg->fc_nlinfo.pid = NETLINK_CB(skb).pid;
cfg->fc_nlinfo.nlh = nlh;
cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
if (tb[RTA_GATEWAY]) {
nla_memcpy(&cfg->fc_gateway, tb[RTA_GATEWAY], 16);
cfg->fc_flags |= RTF_GATEWAY;
}
if (tb[RTA_DST]) {
int plen = (rtm->rtm_dst_len + 7) >> 3;
if (nla_len(tb[RTA_DST]) < plen)
goto errout;
nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
}
if (tb[RTA_SRC]) {
int plen = (rtm->rtm_src_len + 7) >> 3;
if (nla_len(tb[RTA_SRC]) < plen)
goto errout;
nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
}
if (tb[RTA_PREFSRC])
nla_memcpy(&cfg->fc_prefsrc, tb[RTA_PREFSRC], 16);
if (tb[RTA_OIF])
cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
if (tb[RTA_PRIORITY])
cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
if (tb[RTA_METRICS]) {
cfg->fc_mx = nla_data(tb[RTA_METRICS]);
cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
}
if (tb[RTA_TABLE])
cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
err = 0;
errout:
return err;
}
static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
{
struct fib6_config cfg;
int err;
err = rtm_to_fib6_config(skb, nlh, &cfg);
if (err < 0)
return err;
return ip6_route_del(&cfg);
}
static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
{
struct fib6_config cfg;
int err;
err = rtm_to_fib6_config(skb, nlh, &cfg);
if (err < 0)
return err;
return ip6_route_add(&cfg);
}
static inline size_t rt6_nlmsg_size(void)
{
return NLMSG_ALIGN(sizeof(struct rtmsg))
+ nla_total_size(16) /* RTA_SRC */
+ nla_total_size(16) /* RTA_DST */
+ nla_total_size(16) /* RTA_GATEWAY */
+ nla_total_size(16) /* RTA_PREFSRC */
+ nla_total_size(4) /* RTA_TABLE */
+ nla_total_size(4) /* RTA_IIF */
+ nla_total_size(4) /* RTA_OIF */
+ nla_total_size(4) /* RTA_PRIORITY */
+ RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
+ nla_total_size(sizeof(struct rta_cacheinfo));
}
static int rt6_fill_node(struct net *net,
struct sk_buff *skb, struct rt6_info *rt,
struct in6_addr *dst, struct in6_addr *src,
int iif, int type, u32 pid, u32 seq,
int prefix, int nowait, unsigned int flags)
{
const struct inet_peer *peer;
struct rtmsg *rtm;
struct nlmsghdr *nlh;
long expires;
u32 table;
struct neighbour *n;
u32 ts, tsage;
if (prefix) { /* user wants prefix routes only */
if (!(rt->rt6i_flags & RTF_PREFIX_RT)) {
/* success since this is not a prefix route */
return 1;
}
}
nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtm), flags);
if (!nlh)
return -EMSGSIZE;
rtm = nlmsg_data(nlh);
rtm->rtm_family = AF_INET6;
rtm->rtm_dst_len = rt->rt6i_dst.plen;
rtm->rtm_src_len = rt->rt6i_src.plen;
rtm->rtm_tos = 0;
if (rt->rt6i_table)
table = rt->rt6i_table->tb6_id;
else
table = RT6_TABLE_UNSPEC;
rtm->rtm_table = table;
NLA_PUT_U32(skb, RTA_TABLE, table);
if (rt->rt6i_flags & RTF_REJECT)
rtm->rtm_type = RTN_UNREACHABLE;
else if (rt->rt6i_flags & RTF_LOCAL)
rtm->rtm_type = RTN_LOCAL;
else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
rtm->rtm_type = RTN_LOCAL;
else
rtm->rtm_type = RTN_UNICAST;
rtm->rtm_flags = 0;
rtm->rtm_scope = RT_SCOPE_UNIVERSE;
rtm->rtm_protocol = rt->rt6i_protocol;
if (rt->rt6i_flags & RTF_DYNAMIC)
rtm->rtm_protocol = RTPROT_REDIRECT;
else if (rt->rt6i_flags & RTF_ADDRCONF)
rtm->rtm_protocol = RTPROT_KERNEL;
else if (rt->rt6i_flags & RTF_DEFAULT)
rtm->rtm_protocol = RTPROT_RA;
if (rt->rt6i_flags & RTF_CACHE)
rtm->rtm_flags |= RTM_F_CLONED;
if (dst) {
NLA_PUT(skb, RTA_DST, 16, dst);
rtm->rtm_dst_len = 128;
} else if (rtm->rtm_dst_len)
NLA_PUT(skb, RTA_DST, 16, &rt->rt6i_dst.addr);
#ifdef CONFIG_IPV6_SUBTREES
if (src) {
NLA_PUT(skb, RTA_SRC, 16, src);
rtm->rtm_src_len = 128;
} else if (rtm->rtm_src_len)
NLA_PUT(skb, RTA_SRC, 16, &rt->rt6i_src.addr);
#endif
if (iif) {
#ifdef CONFIG_IPV6_MROUTE
if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
int err = ip6mr_get_route(net, skb, rtm, nowait);
if (err <= 0) {
if (!nowait) {
if (err == 0)
return 0;
goto nla_put_failure;
} else {
if (err == -EMSGSIZE)
goto nla_put_failure;
}
}
} else
#endif
NLA_PUT_U32(skb, RTA_IIF, iif);
} else if (dst) {
struct in6_addr saddr_buf;
if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0)
NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
}
if (rt->rt6i_prefsrc.plen) {
struct in6_addr saddr_buf;
saddr_buf = rt->rt6i_prefsrc.addr;
NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
}
if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
goto nla_put_failure;
rcu_read_lock();
n = dst_get_neighbour_noref(&rt->dst);
if (n) {
if (nla_put(skb, RTA_GATEWAY, 16, &n->primary_key) < 0) {
rcu_read_unlock();
goto nla_put_failure;
}
}
rcu_read_unlock();
if (rt->dst.dev)
NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex);
NLA_PUT_U32(skb, RTA_PRIORITY, rt->rt6i_metric);
if (!(rt->rt6i_flags & RTF_EXPIRES))
expires = 0;
else if (rt->dst.expires - jiffies < INT_MAX)
expires = rt->dst.expires - jiffies;
else
expires = INT_MAX;
peer = rt->rt6i_peer;
ts = tsage = 0;
if (peer && peer->tcp_ts_stamp) {
ts = peer->tcp_ts;
tsage = get_seconds() - peer->tcp_ts_stamp;
}
if (rtnl_put_cacheinfo(skb, &rt->dst, 0, ts, tsage,
expires, rt->dst.error) < 0)
goto nla_put_failure;
return nlmsg_end(skb, nlh);
nla_put_failure:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
int rt6_dump_route(struct rt6_info *rt, void *p_arg)
{
struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
int prefix;
if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) {
struct rtmsg *rtm = nlmsg_data(arg->cb->nlh);
prefix = (rtm->rtm_flags & RTM_F_PREFIX) != 0;
} else
prefix = 0;
return rt6_fill_node(arg->net,
arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
NETLINK_CB(arg->cb->skb).pid, arg->cb->nlh->nlmsg_seq,
prefix, 0, NLM_F_MULTI);
}
static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
{
struct net *net = sock_net(in_skb->sk);
struct nlattr *tb[RTA_MAX+1];
struct rt6_info *rt;
struct sk_buff *skb;
struct rtmsg *rtm;
struct flowi6 fl6;
int err, iif = 0, oif = 0;
err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
if (err < 0)
goto errout;
err = -EINVAL;
memset(&fl6, 0, sizeof(fl6));
if (tb[RTA_SRC]) {
if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
goto errout;
fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
}
if (tb[RTA_DST]) {
if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
goto errout;
fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
}
if (tb[RTA_IIF])
iif = nla_get_u32(tb[RTA_IIF]);
if (tb[RTA_OIF])
oif = nla_get_u32(tb[RTA_OIF]);
if (iif) {
struct net_device *dev;
int flags = 0;
dev = __dev_get_by_index(net, iif);
if (!dev) {
err = -ENODEV;
goto errout;
}
fl6.flowi6_iif = iif;
if (!ipv6_addr_any(&fl6.saddr))
flags |= RT6_LOOKUP_F_HAS_SADDR;
rt = (struct rt6_info *)ip6_route_input_lookup(net, dev, &fl6,
flags);
} else {
fl6.flowi6_oif = oif;
rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6);
}
skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
if (!skb) {
err = -ENOBUFS;
goto errout;
}
/* Reserve room for dummy headers, this skb can pass
through good chunk of routing engine.
*/
skb_reset_mac_header(skb);
skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr));
skb_dst_set(skb, &rt->dst);
err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif,
RTM_NEWROUTE, NETLINK_CB(in_skb).pid,
nlh->nlmsg_seq, 0, 0, 0);
if (err < 0) {
kfree_skb(skb);
goto errout;
}
err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
errout:
return err;
}
void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
{
struct sk_buff *skb;
struct net *net = info->nl_net;
u32 seq;
int err;
err = -ENOBUFS;
seq = info->nlh ? info->nlh->nlmsg_seq : 0;
skb = nlmsg_new(rt6_nlmsg_size(), gfp_any());
if (!skb)
goto errout;
err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
event, info->pid, seq, 0, 0, 0);
if (err < 0) {
/* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
WARN_ON(err == -EMSGSIZE);
kfree_skb(skb);
goto errout;
}
rtnl_notify(skb, net, info->pid, RTNLGRP_IPV6_ROUTE,
info->nlh, gfp_any());
return;
errout:
if (err < 0)
rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
}
static int ip6_route_dev_notify(struct notifier_block *this,
unsigned long event, void *data)
{
struct net_device *dev = (struct net_device *)data;
struct net *net = dev_net(dev);
if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) {
net->ipv6.ip6_null_entry->dst.dev = dev;
net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
net->ipv6.ip6_prohibit_entry->dst.dev = dev;
net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
#endif
}
return NOTIFY_OK;
}
/*
* /proc
*/
#ifdef CONFIG_PROC_FS
struct rt6_proc_arg
{
char *buffer;
int offset;
int length;
int skip;
int len;
};
static int rt6_info_route(struct rt6_info *rt, void *p_arg)
{
struct seq_file *m = p_arg;
struct neighbour *n;
seq_printf(m, "%pi6 %02x ", &rt->rt6i_dst.addr, rt->rt6i_dst.plen);
#ifdef CONFIG_IPV6_SUBTREES
seq_printf(m, "%pi6 %02x ", &rt->rt6i_src.addr, rt->rt6i_src.plen);
#else
seq_puts(m, "00000000000000000000000000000000 00 ");
#endif
rcu_read_lock();
n = dst_get_neighbour_noref(&rt->dst);
if (n) {
seq_printf(m, "%pi6", n->primary_key);
} else {
seq_puts(m, "00000000000000000000000000000000");
}
rcu_read_unlock();
seq_printf(m, " %08x %08x %08x %08x %8s\n",
rt->rt6i_metric, atomic_read(&rt->dst.__refcnt),
rt->dst.__use, rt->rt6i_flags,
rt->dst.dev ? rt->dst.dev->name : "");
return 0;
}
static int ipv6_route_show(struct seq_file *m, void *v)
{
struct net *net = (struct net *)m->private;
fib6_clean_all_ro(net, rt6_info_route, 0, m);
return 0;
}
static int ipv6_route_open(struct inode *inode, struct file *file)
{
return single_open_net(inode, file, ipv6_route_show);
}
static const struct file_operations ipv6_route_proc_fops = {
.owner = THIS_MODULE,
.open = ipv6_route_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release_net,
};
static int rt6_stats_seq_show(struct seq_file *seq, void *v)
{
struct net *net = (struct net *)seq->private;
seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
net->ipv6.rt6_stats->fib_nodes,
net->ipv6.rt6_stats->fib_route_nodes,
net->ipv6.rt6_stats->fib_rt_alloc,
net->ipv6.rt6_stats->fib_rt_entries,
net->ipv6.rt6_stats->fib_rt_cache,
dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
net->ipv6.rt6_stats->fib_discarded_routes);
return 0;
}
static int rt6_stats_seq_open(struct inode *inode, struct file *file)
{
return single_open_net(inode, file, rt6_stats_seq_show);
}
static const struct file_operations rt6_stats_seq_fops = {
.owner = THIS_MODULE,
.open = rt6_stats_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release_net,
};
#endif /* CONFIG_PROC_FS */
#ifdef CONFIG_SYSCTL
static
int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net;
int delay;
if (!write)
return -EINVAL;
net = (struct net *)ctl->extra1;
delay = net->ipv6.sysctl.flush_delay;
proc_dointvec(ctl, write, buffer, lenp, ppos);
fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net);
return 0;
}
ctl_table ipv6_route_table_template[] = {
{
.procname = "flush",
.data = &init_net.ipv6.sysctl.flush_delay,
.maxlen = sizeof(int),
.mode = 0200,
.proc_handler = ipv6_sysctl_rtcache_flush
},
{
.procname = "gc_thresh",
.data = &ip6_dst_ops_template.gc_thresh,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "max_size",
.data = &init_net.ipv6.sysctl.ip6_rt_max_size,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "gc_min_interval",
.data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "gc_timeout",
.data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "gc_interval",
.data = &init_net.ipv6.sysctl.ip6_rt_gc_interval,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "gc_elasticity",
.data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "mtu_expires",
.data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "min_adv_mss",
.data = &init_net.ipv6.sysctl.ip6_rt_min_advmss,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "gc_min_interval_ms",
.data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_ms_jiffies,
},
{ }
};
struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
{
struct ctl_table *table;
table = kmemdup(ipv6_route_table_template,
sizeof(ipv6_route_table_template),
GFP_KERNEL);
if (table) {
table[0].data = &net->ipv6.sysctl.flush_delay;
table[0].extra1 = net;
table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
}
return table;
}
#endif
static int __net_init ip6_route_net_init(struct net *net)
{
int ret = -ENOMEM;
memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
sizeof(net->ipv6.ip6_dst_ops));
if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
goto out_ip6_dst_ops;
net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
sizeof(*net->ipv6.ip6_null_entry),
GFP_KERNEL);
if (!net->ipv6.ip6_null_entry)
goto out_ip6_dst_entries;
net->ipv6.ip6_null_entry->dst.path =
(struct dst_entry *)net->ipv6.ip6_null_entry;
net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
ip6_template_metrics, true);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
sizeof(*net->ipv6.ip6_prohibit_entry),
GFP_KERNEL);
if (!net->ipv6.ip6_prohibit_entry)
goto out_ip6_null_entry;
net->ipv6.ip6_prohibit_entry->dst.path =
(struct dst_entry *)net->ipv6.ip6_prohibit_entry;
net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
ip6_template_metrics, true);
net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
sizeof(*net->ipv6.ip6_blk_hole_entry),
GFP_KERNEL);
if (!net->ipv6.ip6_blk_hole_entry)
goto out_ip6_prohibit_entry;
net->ipv6.ip6_blk_hole_entry->dst.path =
(struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
ip6_template_metrics, true);
#endif
net->ipv6.sysctl.flush_delay = 0;
net->ipv6.sysctl.ip6_rt_max_size = 4096;
net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
net->ipv6.ip6_rt_gc_expire = 30*HZ;
ret = 0;
out:
return ret;
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
out_ip6_prohibit_entry:
kfree(net->ipv6.ip6_prohibit_entry);
out_ip6_null_entry:
kfree(net->ipv6.ip6_null_entry);
#endif
out_ip6_dst_entries:
dst_entries_destroy(&net->ipv6.ip6_dst_ops);
out_ip6_dst_ops:
goto out;
}
static void __net_exit ip6_route_net_exit(struct net *net)
{
kfree(net->ipv6.ip6_null_entry);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
kfree(net->ipv6.ip6_prohibit_entry);
kfree(net->ipv6.ip6_blk_hole_entry);
#endif
dst_entries_destroy(&net->ipv6.ip6_dst_ops);
}
static int __net_init ip6_route_net_init_late(struct net *net)
{
#ifdef CONFIG_PROC_FS
proc_net_fops_create(net, "ipv6_route", 0, &ipv6_route_proc_fops);
proc_net_fops_create(net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops);
#endif
return 0;
}
static void __net_exit ip6_route_net_exit_late(struct net *net)
{
#ifdef CONFIG_PROC_FS
proc_net_remove(net, "ipv6_route");
proc_net_remove(net, "rt6_stats");
#endif
}
static struct pernet_operations ip6_route_net_ops = {
.init = ip6_route_net_init,
.exit = ip6_route_net_exit,
};
static struct pernet_operations ip6_route_net_late_ops = {
.init = ip6_route_net_init_late,
.exit = ip6_route_net_exit_late,
};
static struct notifier_block ip6_route_dev_notifier = {
.notifier_call = ip6_route_dev_notify,
.priority = 0,
};
int __init ip6_route_init(void)
{
int ret;
ret = -ENOMEM;
ip6_dst_ops_template.kmem_cachep =
kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!ip6_dst_ops_template.kmem_cachep)
goto out;
ret = dst_entries_init(&ip6_dst_blackhole_ops);
if (ret)
goto out_kmem_cache;
ret = register_pernet_subsys(&ip6_route_net_ops);
if (ret)
goto out_dst_entries;
ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
/* Registering of the loopback is done before this portion of code,
* the loopback reference in rt6_info will not be taken, do it
* manually for init_net */
init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
#endif
ret = fib6_init();
if (ret)
goto out_register_subsys;
ret = xfrm6_init();
if (ret)
goto out_fib6_init;
ret = fib6_rules_init();
if (ret)
goto xfrm6_init;
ret = register_pernet_subsys(&ip6_route_net_late_ops);
if (ret)
goto fib6_rules_init;
ret = -ENOBUFS;
if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, NULL) ||
__rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, NULL) ||
__rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL, NULL))
goto out_register_late_subsys;
ret = register_netdevice_notifier(&ip6_route_dev_notifier);
if (ret)
goto out_register_late_subsys;
out:
return ret;
out_register_late_subsys:
unregister_pernet_subsys(&ip6_route_net_late_ops);
fib6_rules_init:
fib6_rules_cleanup();
xfrm6_init:
xfrm6_fini();
out_fib6_init:
fib6_gc_cleanup();
out_register_subsys:
unregister_pernet_subsys(&ip6_route_net_ops);
out_dst_entries:
dst_entries_destroy(&ip6_dst_blackhole_ops);
out_kmem_cache:
kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
goto out;
}
void ip6_route_cleanup(void)
{
unregister_netdevice_notifier(&ip6_route_dev_notifier);
unregister_pernet_subsys(&ip6_route_net_late_ops);
fib6_rules_cleanup();
xfrm6_fini();
fib6_gc_cleanup();
unregister_pernet_subsys(&ip6_route_net_ops);
dst_entries_destroy(&ip6_dst_blackhole_ops);
kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
}
| gpl-2.0 |
mobile-event-processing/Asper | source/src/com/espertech/esper/epl/subquery/package.html | 853 | <!--
~ **************************************************************************************
~ * Copyright (C) 2008 EsperTech, Inc. All rights reserved. *
~ * http://esper.codehaus.org *
~ * http://www.espertech.com *
~ * ---------------------------------------------------------------------------------- *
~ * The software in this package is published under the terms of the GPL license *
~ * a copy of which has been included with this distribution in the license.txt file. *
~ **************************************************************************************
-->
<html>
<head></head>
<body>
<p>
Subquery helper classes are found here.
</p>
</body>
</html>
| gpl-2.0 |
MegabytePhreak/qemu-mcf5307 | target-arm/neon_helper.c | 53355 | /*
* ARM NEON vector operations.
*
* Copyright (c) 2007, 2008 CodeSourcery.
* Written by Paul Brook
*
* This code is licenced under the GNU GPL v2.
*/
#include <stdlib.h>
#include <stdio.h>
#include "cpu.h"
#include "exec.h"
#include "helper.h"
#define SIGNBIT (uint32_t)0x80000000
#define SIGNBIT64 ((uint64_t)1 << 63)
#define SET_QC() env->vfp.xregs[ARM_VFP_FPSCR] = CPSR_Q
#define NFS (&env->vfp.standard_fp_status)
#define NEON_TYPE1(name, type) \
typedef struct \
{ \
type v1; \
} neon_##name;
#ifdef HOST_WORDS_BIGENDIAN
#define NEON_TYPE2(name, type) \
typedef struct \
{ \
type v2; \
type v1; \
} neon_##name;
#define NEON_TYPE4(name, type) \
typedef struct \
{ \
type v4; \
type v3; \
type v2; \
type v1; \
} neon_##name;
#else
#define NEON_TYPE2(name, type) \
typedef struct \
{ \
type v1; \
type v2; \
} neon_##name;
#define NEON_TYPE4(name, type) \
typedef struct \
{ \
type v1; \
type v2; \
type v3; \
type v4; \
} neon_##name;
#endif
NEON_TYPE4(s8, int8_t)
NEON_TYPE4(u8, uint8_t)
NEON_TYPE2(s16, int16_t)
NEON_TYPE2(u16, uint16_t)
NEON_TYPE1(s32, int32_t)
NEON_TYPE1(u32, uint32_t)
#undef NEON_TYPE4
#undef NEON_TYPE2
#undef NEON_TYPE1
/* Copy from a uint32_t to a vector structure type. */
#define NEON_UNPACK(vtype, dest, val) do { \
union { \
vtype v; \
uint32_t i; \
} conv_u; \
conv_u.i = (val); \
dest = conv_u.v; \
} while(0)
/* Copy from a vector structure type to a uint32_t. */
#define NEON_PACK(vtype, dest, val) do { \
union { \
vtype v; \
uint32_t i; \
} conv_u; \
conv_u.v = (val); \
dest = conv_u.i; \
} while(0)
#define NEON_DO1 \
NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1);
#define NEON_DO2 \
NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); \
NEON_FN(vdest.v2, vsrc1.v2, vsrc2.v2);
#define NEON_DO4 \
NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); \
NEON_FN(vdest.v2, vsrc1.v2, vsrc2.v2); \
NEON_FN(vdest.v3, vsrc1.v3, vsrc2.v3); \
NEON_FN(vdest.v4, vsrc1.v4, vsrc2.v4);
#define NEON_VOP_BODY(vtype, n) \
{ \
uint32_t res; \
vtype vsrc1; \
vtype vsrc2; \
vtype vdest; \
NEON_UNPACK(vtype, vsrc1, arg1); \
NEON_UNPACK(vtype, vsrc2, arg2); \
NEON_DO##n; \
NEON_PACK(vtype, res, vdest); \
return res; \
}
#define NEON_VOP(name, vtype, n) \
uint32_t HELPER(glue(neon_,name))(uint32_t arg1, uint32_t arg2) \
NEON_VOP_BODY(vtype, n)
/* Pairwise operations. */
/* For 32-bit elements each segment only contains a single element, so
the elementwise and pairwise operations are the same. */
#define NEON_PDO2 \
NEON_FN(vdest.v1, vsrc1.v1, vsrc1.v2); \
NEON_FN(vdest.v2, vsrc2.v1, vsrc2.v2);
#define NEON_PDO4 \
NEON_FN(vdest.v1, vsrc1.v1, vsrc1.v2); \
NEON_FN(vdest.v2, vsrc1.v3, vsrc1.v4); \
NEON_FN(vdest.v3, vsrc2.v1, vsrc2.v2); \
NEON_FN(vdest.v4, vsrc2.v3, vsrc2.v4); \
#define NEON_POP(name, vtype, n) \
uint32_t HELPER(glue(neon_,name))(uint32_t arg1, uint32_t arg2) \
{ \
uint32_t res; \
vtype vsrc1; \
vtype vsrc2; \
vtype vdest; \
NEON_UNPACK(vtype, vsrc1, arg1); \
NEON_UNPACK(vtype, vsrc2, arg2); \
NEON_PDO##n; \
NEON_PACK(vtype, res, vdest); \
return res; \
}
/* Unary operators. */
#define NEON_VOP1(name, vtype, n) \
uint32_t HELPER(glue(neon_,name))(uint32_t arg) \
{ \
vtype vsrc1; \
vtype vdest; \
NEON_UNPACK(vtype, vsrc1, arg); \
NEON_DO##n; \
NEON_PACK(vtype, arg, vdest); \
return arg; \
}
#define NEON_USAT(dest, src1, src2, type) do { \
uint32_t tmp = (uint32_t)src1 + (uint32_t)src2; \
if (tmp != (type)tmp) { \
SET_QC(); \
dest = ~0; \
} else { \
dest = tmp; \
}} while(0)
#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t)
NEON_VOP(qadd_u8, neon_u8, 4)
#undef NEON_FN
#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t)
NEON_VOP(qadd_u16, neon_u16, 2)
#undef NEON_FN
#undef NEON_USAT
uint32_t HELPER(neon_qadd_u32)(uint32_t a, uint32_t b)
{
uint32_t res = a + b;
if (res < a) {
SET_QC();
res = ~0;
}
return res;
}
uint64_t HELPER(neon_qadd_u64)(uint64_t src1, uint64_t src2)
{
uint64_t res;
res = src1 + src2;
if (res < src1) {
SET_QC();
res = ~(uint64_t)0;
}
return res;
}
#define NEON_SSAT(dest, src1, src2, type) do { \
int32_t tmp = (uint32_t)src1 + (uint32_t)src2; \
if (tmp != (type)tmp) { \
SET_QC(); \
if (src2 > 0) { \
tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \
} else { \
tmp = 1 << (sizeof(type) * 8 - 1); \
} \
} \
dest = tmp; \
} while(0)
#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t)
NEON_VOP(qadd_s8, neon_s8, 4)
#undef NEON_FN
#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t)
NEON_VOP(qadd_s16, neon_s16, 2)
#undef NEON_FN
#undef NEON_SSAT
uint32_t HELPER(neon_qadd_s32)(uint32_t a, uint32_t b)
{
uint32_t res = a + b;
if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
SET_QC();
res = ~(((int32_t)a >> 31) ^ SIGNBIT);
}
return res;
}
uint64_t HELPER(neon_qadd_s64)(uint64_t src1, uint64_t src2)
{
uint64_t res;
res = src1 + src2;
if (((res ^ src1) & SIGNBIT64) && !((src1 ^ src2) & SIGNBIT64)) {
SET_QC();
res = ((int64_t)src1 >> 63) ^ ~SIGNBIT64;
}
return res;
}
#define NEON_USAT(dest, src1, src2, type) do { \
uint32_t tmp = (uint32_t)src1 - (uint32_t)src2; \
if (tmp != (type)tmp) { \
SET_QC(); \
dest = 0; \
} else { \
dest = tmp; \
}} while(0)
#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t)
NEON_VOP(qsub_u8, neon_u8, 4)
#undef NEON_FN
#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t)
NEON_VOP(qsub_u16, neon_u16, 2)
#undef NEON_FN
#undef NEON_USAT
uint32_t HELPER(neon_qsub_u32)(uint32_t a, uint32_t b)
{
uint32_t res = a - b;
if (res > a) {
SET_QC();
res = 0;
}
return res;
}
uint64_t HELPER(neon_qsub_u64)(uint64_t src1, uint64_t src2)
{
uint64_t res;
if (src1 < src2) {
SET_QC();
res = 0;
} else {
res = src1 - src2;
}
return res;
}
#define NEON_SSAT(dest, src1, src2, type) do { \
int32_t tmp = (uint32_t)src1 - (uint32_t)src2; \
if (tmp != (type)tmp) { \
SET_QC(); \
if (src2 < 0) { \
tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \
} else { \
tmp = 1 << (sizeof(type) * 8 - 1); \
} \
} \
dest = tmp; \
} while(0)
#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t)
NEON_VOP(qsub_s8, neon_s8, 4)
#undef NEON_FN
#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t)
NEON_VOP(qsub_s16, neon_s16, 2)
#undef NEON_FN
#undef NEON_SSAT
uint32_t HELPER(neon_qsub_s32)(uint32_t a, uint32_t b)
{
uint32_t res = a - b;
if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
SET_QC();
res = ~(((int32_t)a >> 31) ^ SIGNBIT);
}
return res;
}
uint64_t HELPER(neon_qsub_s64)(uint64_t src1, uint64_t src2)
{
uint64_t res;
res = src1 - src2;
if (((res ^ src1) & SIGNBIT64) && ((src1 ^ src2) & SIGNBIT64)) {
SET_QC();
res = ((int64_t)src1 >> 63) ^ ~SIGNBIT64;
}
return res;
}
#define NEON_FN(dest, src1, src2) dest = (src1 + src2) >> 1
NEON_VOP(hadd_s8, neon_s8, 4)
NEON_VOP(hadd_u8, neon_u8, 4)
NEON_VOP(hadd_s16, neon_s16, 2)
NEON_VOP(hadd_u16, neon_u16, 2)
#undef NEON_FN
int32_t HELPER(neon_hadd_s32)(int32_t src1, int32_t src2)
{
int32_t dest;
dest = (src1 >> 1) + (src2 >> 1);
if (src1 & src2 & 1)
dest++;
return dest;
}
uint32_t HELPER(neon_hadd_u32)(uint32_t src1, uint32_t src2)
{
uint32_t dest;
dest = (src1 >> 1) + (src2 >> 1);
if (src1 & src2 & 1)
dest++;
return dest;
}
#define NEON_FN(dest, src1, src2) dest = (src1 + src2 + 1) >> 1
NEON_VOP(rhadd_s8, neon_s8, 4)
NEON_VOP(rhadd_u8, neon_u8, 4)
NEON_VOP(rhadd_s16, neon_s16, 2)
NEON_VOP(rhadd_u16, neon_u16, 2)
#undef NEON_FN
int32_t HELPER(neon_rhadd_s32)(int32_t src1, int32_t src2)
{
int32_t dest;
dest = (src1 >> 1) + (src2 >> 1);
if ((src1 | src2) & 1)
dest++;
return dest;
}
uint32_t HELPER(neon_rhadd_u32)(uint32_t src1, uint32_t src2)
{
uint32_t dest;
dest = (src1 >> 1) + (src2 >> 1);
if ((src1 | src2) & 1)
dest++;
return dest;
}
#define NEON_FN(dest, src1, src2) dest = (src1 - src2) >> 1
NEON_VOP(hsub_s8, neon_s8, 4)
NEON_VOP(hsub_u8, neon_u8, 4)
NEON_VOP(hsub_s16, neon_s16, 2)
NEON_VOP(hsub_u16, neon_u16, 2)
#undef NEON_FN
int32_t HELPER(neon_hsub_s32)(int32_t src1, int32_t src2)
{
int32_t dest;
dest = (src1 >> 1) - (src2 >> 1);
if ((~src1) & src2 & 1)
dest--;
return dest;
}
uint32_t HELPER(neon_hsub_u32)(uint32_t src1, uint32_t src2)
{
uint32_t dest;
dest = (src1 >> 1) - (src2 >> 1);
if ((~src1) & src2 & 1)
dest--;
return dest;
}
#define NEON_FN(dest, src1, src2) dest = (src1 > src2) ? ~0 : 0
NEON_VOP(cgt_s8, neon_s8, 4)
NEON_VOP(cgt_u8, neon_u8, 4)
NEON_VOP(cgt_s16, neon_s16, 2)
NEON_VOP(cgt_u16, neon_u16, 2)
NEON_VOP(cgt_s32, neon_s32, 1)
NEON_VOP(cgt_u32, neon_u32, 1)
#undef NEON_FN
#define NEON_FN(dest, src1, src2) dest = (src1 >= src2) ? ~0 : 0
NEON_VOP(cge_s8, neon_s8, 4)
NEON_VOP(cge_u8, neon_u8, 4)
NEON_VOP(cge_s16, neon_s16, 2)
NEON_VOP(cge_u16, neon_u16, 2)
NEON_VOP(cge_s32, neon_s32, 1)
NEON_VOP(cge_u32, neon_u32, 1)
#undef NEON_FN
#define NEON_FN(dest, src1, src2) dest = (src1 < src2) ? src1 : src2
NEON_VOP(min_s8, neon_s8, 4)
NEON_VOP(min_u8, neon_u8, 4)
NEON_VOP(min_s16, neon_s16, 2)
NEON_VOP(min_u16, neon_u16, 2)
NEON_VOP(min_s32, neon_s32, 1)
NEON_VOP(min_u32, neon_u32, 1)
NEON_POP(pmin_s8, neon_s8, 4)
NEON_POP(pmin_u8, neon_u8, 4)
NEON_POP(pmin_s16, neon_s16, 2)
NEON_POP(pmin_u16, neon_u16, 2)
#undef NEON_FN
#define NEON_FN(dest, src1, src2) dest = (src1 > src2) ? src1 : src2
NEON_VOP(max_s8, neon_s8, 4)
NEON_VOP(max_u8, neon_u8, 4)
NEON_VOP(max_s16, neon_s16, 2)
NEON_VOP(max_u16, neon_u16, 2)
NEON_VOP(max_s32, neon_s32, 1)
NEON_VOP(max_u32, neon_u32, 1)
NEON_POP(pmax_s8, neon_s8, 4)
NEON_POP(pmax_u8, neon_u8, 4)
NEON_POP(pmax_s16, neon_s16, 2)
NEON_POP(pmax_u16, neon_u16, 2)
#undef NEON_FN
#define NEON_FN(dest, src1, src2) \
dest = (src1 > src2) ? (src1 - src2) : (src2 - src1)
NEON_VOP(abd_s8, neon_s8, 4)
NEON_VOP(abd_u8, neon_u8, 4)
NEON_VOP(abd_s16, neon_s16, 2)
NEON_VOP(abd_u16, neon_u16, 2)
NEON_VOP(abd_s32, neon_s32, 1)
NEON_VOP(abd_u32, neon_u32, 1)
#undef NEON_FN
#define NEON_FN(dest, src1, src2) do { \
int8_t tmp; \
tmp = (int8_t)src2; \
if (tmp >= (ssize_t)sizeof(src1) * 8 || \
tmp <= -(ssize_t)sizeof(src1) * 8) { \
dest = 0; \
} else if (tmp < 0) { \
dest = src1 >> -tmp; \
} else { \
dest = src1 << tmp; \
}} while (0)
NEON_VOP(shl_u8, neon_u8, 4)
NEON_VOP(shl_u16, neon_u16, 2)
NEON_VOP(shl_u32, neon_u32, 1)
#undef NEON_FN
uint64_t HELPER(neon_shl_u64)(uint64_t val, uint64_t shiftop)
{
int8_t shift = (int8_t)shiftop;
if (shift >= 64 || shift <= -64) {
val = 0;
} else if (shift < 0) {
val >>= -shift;
} else {
val <<= shift;
}
return val;
}
#define NEON_FN(dest, src1, src2) do { \
int8_t tmp; \
tmp = (int8_t)src2; \
if (tmp >= (ssize_t)sizeof(src1) * 8) { \
dest = 0; \
} else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
dest = src1 >> (sizeof(src1) * 8 - 1); \
} else if (tmp < 0) { \
dest = src1 >> -tmp; \
} else { \
dest = src1 << tmp; \
}} while (0)
NEON_VOP(shl_s8, neon_s8, 4)
NEON_VOP(shl_s16, neon_s16, 2)
NEON_VOP(shl_s32, neon_s32, 1)
#undef NEON_FN
uint64_t HELPER(neon_shl_s64)(uint64_t valop, uint64_t shiftop)
{
int8_t shift = (int8_t)shiftop;
int64_t val = valop;
if (shift >= 64) {
val = 0;
} else if (shift <= -64) {
val >>= 63;
} else if (shift < 0) {
val >>= -shift;
} else {
val <<= shift;
}
return val;
}
#define NEON_FN(dest, src1, src2) do { \
int8_t tmp; \
tmp = (int8_t)src2; \
if ((tmp >= (ssize_t)sizeof(src1) * 8) \
|| (tmp <= -(ssize_t)sizeof(src1) * 8)) { \
dest = 0; \
} else if (tmp < 0) { \
dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
} else { \
dest = src1 << tmp; \
}} while (0)
NEON_VOP(rshl_s8, neon_s8, 4)
NEON_VOP(rshl_s16, neon_s16, 2)
#undef NEON_FN
/* The addition of the rounding constant may overflow, so we use an
* intermediate 64 bits accumulator. */
uint32_t HELPER(neon_rshl_s32)(uint32_t valop, uint32_t shiftop)
{
int32_t dest;
int32_t val = (int32_t)valop;
int8_t shift = (int8_t)shiftop;
if ((shift >= 32) || (shift <= -32)) {
dest = 0;
} else if (shift < 0) {
int64_t big_dest = ((int64_t)val + (1 << (-1 - shift)));
dest = big_dest >> -shift;
} else {
dest = val << shift;
}
return dest;
}
/* Handling addition overflow with 64 bits inputs values is more
* tricky than with 32 bits values. */
uint64_t HELPER(neon_rshl_s64)(uint64_t valop, uint64_t shiftop)
{
int8_t shift = (int8_t)shiftop;
int64_t val = valop;
if ((shift >= 64) || (shift <= -64)) {
val = 0;
} else if (shift < 0) {
val >>= (-shift - 1);
if (val == INT64_MAX) {
/* In this case, it means that the rounding constant is 1,
* and the addition would overflow. Return the actual
* result directly. */
val = 0x4000000000000000LL;
} else {
val++;
val >>= 1;
}
} else {
val <<= shift;
}
return val;
}
#define NEON_FN(dest, src1, src2) do { \
int8_t tmp; \
tmp = (int8_t)src2; \
if (tmp >= (ssize_t)sizeof(src1) * 8 || \
tmp < -(ssize_t)sizeof(src1) * 8) { \
dest = 0; \
} else if (tmp == -(ssize_t)sizeof(src1) * 8) { \
dest = src1 >> (-tmp - 1); \
} else if (tmp < 0) { \
dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
} else { \
dest = src1 << tmp; \
}} while (0)
NEON_VOP(rshl_u8, neon_u8, 4)
NEON_VOP(rshl_u16, neon_u16, 2)
#undef NEON_FN
/* The addition of the rounding constant may overflow, so we use an
* intermediate 64 bits accumulator. */
uint32_t HELPER(neon_rshl_u32)(uint32_t val, uint32_t shiftop)
{
uint32_t dest;
int8_t shift = (int8_t)shiftop;
if (shift >= 32 || shift < -32) {
dest = 0;
} else if (shift == -32) {
dest = val >> 31;
} else if (shift < 0) {
uint64_t big_dest = ((uint64_t)val + (1 << (-1 - shift)));
dest = big_dest >> -shift;
} else {
dest = val << shift;
}
return dest;
}
/* Handling addition overflow with 64 bits inputs values is more
* tricky than with 32 bits values. */
uint64_t HELPER(neon_rshl_u64)(uint64_t val, uint64_t shiftop)
{
int8_t shift = (uint8_t)shiftop;
if (shift >= 64 || shift < -64) {
val = 0;
} else if (shift == -64) {
/* Rounding a 1-bit result just preserves that bit. */
val >>= 63;
} else if (shift < 0) {
val >>= (-shift - 1);
if (val == UINT64_MAX) {
/* In this case, it means that the rounding constant is 1,
* and the addition would overflow. Return the actual
* result directly. */
val = 0x8000000000000000ULL;
} else {
val++;
val >>= 1;
}
} else {
val <<= shift;
}
return val;
}
#define NEON_FN(dest, src1, src2) do { \
int8_t tmp; \
tmp = (int8_t)src2; \
if (tmp >= (ssize_t)sizeof(src1) * 8) { \
if (src1) { \
SET_QC(); \
dest = ~0; \
} else { \
dest = 0; \
} \
} else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
dest = 0; \
} else if (tmp < 0) { \
dest = src1 >> -tmp; \
} else { \
dest = src1 << tmp; \
if ((dest >> tmp) != src1) { \
SET_QC(); \
dest = ~0; \
} \
}} while (0)
NEON_VOP(qshl_u8, neon_u8, 4)
NEON_VOP(qshl_u16, neon_u16, 2)
NEON_VOP(qshl_u32, neon_u32, 1)
#undef NEON_FN
uint64_t HELPER(neon_qshl_u64)(uint64_t val, uint64_t shiftop)
{
int8_t shift = (int8_t)shiftop;
if (shift >= 64) {
if (val) {
val = ~(uint64_t)0;
SET_QC();
}
} else if (shift <= -64) {
val = 0;
} else if (shift < 0) {
val >>= -shift;
} else {
uint64_t tmp = val;
val <<= shift;
if ((val >> shift) != tmp) {
SET_QC();
val = ~(uint64_t)0;
}
}
return val;
}
#define NEON_FN(dest, src1, src2) do { \
int8_t tmp; \
tmp = (int8_t)src2; \
if (tmp >= (ssize_t)sizeof(src1) * 8) { \
if (src1) { \
SET_QC(); \
dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \
if (src1 > 0) { \
dest--; \
} \
} else { \
dest = src1; \
} \
} else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
dest = src1 >> 31; \
} else if (tmp < 0) { \
dest = src1 >> -tmp; \
} else { \
dest = src1 << tmp; \
if ((dest >> tmp) != src1) { \
SET_QC(); \
dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \
if (src1 > 0) { \
dest--; \
} \
} \
}} while (0)
NEON_VOP(qshl_s8, neon_s8, 4)
NEON_VOP(qshl_s16, neon_s16, 2)
NEON_VOP(qshl_s32, neon_s32, 1)
#undef NEON_FN
uint64_t HELPER(neon_qshl_s64)(uint64_t valop, uint64_t shiftop)
{
int8_t shift = (uint8_t)shiftop;
int64_t val = valop;
if (shift >= 64) {
if (val) {
SET_QC();
val = (val >> 63) ^ ~SIGNBIT64;
}
} else if (shift <= -64) {
val >>= 63;
} else if (shift < 0) {
val >>= -shift;
} else {
int64_t tmp = val;
val <<= shift;
if ((val >> shift) != tmp) {
SET_QC();
val = (tmp >> 63) ^ ~SIGNBIT64;
}
}
return val;
}
#define NEON_FN(dest, src1, src2) do { \
if (src1 & (1 << (sizeof(src1) * 8 - 1))) { \
SET_QC(); \
dest = 0; \
} else { \
int8_t tmp; \
tmp = (int8_t)src2; \
if (tmp >= (ssize_t)sizeof(src1) * 8) { \
if (src1) { \
SET_QC(); \
dest = ~0; \
} else { \
dest = 0; \
} \
} else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
dest = 0; \
} else if (tmp < 0) { \
dest = src1 >> -tmp; \
} else { \
dest = src1 << tmp; \
if ((dest >> tmp) != src1) { \
SET_QC(); \
dest = ~0; \
} \
} \
}} while (0)
NEON_VOP(qshlu_s8, neon_u8, 4)
NEON_VOP(qshlu_s16, neon_u16, 2)
#undef NEON_FN
uint32_t HELPER(neon_qshlu_s32)(uint32_t valop, uint32_t shiftop)
{
if ((int32_t)valop < 0) {
SET_QC();
return 0;
}
return helper_neon_qshl_u32(valop, shiftop);
}
uint64_t HELPER(neon_qshlu_s64)(uint64_t valop, uint64_t shiftop)
{
if ((int64_t)valop < 0) {
SET_QC();
return 0;
}
return helper_neon_qshl_u64(valop, shiftop);
}
/* FIXME: This is wrong. */
#define NEON_FN(dest, src1, src2) do { \
int8_t tmp; \
tmp = (int8_t)src2; \
if (tmp >= (ssize_t)sizeof(src1) * 8) { \
if (src1) { \
SET_QC(); \
dest = ~0; \
} else { \
dest = 0; \
} \
} else if (tmp < -(ssize_t)sizeof(src1) * 8) { \
dest = 0; \
} else if (tmp == -(ssize_t)sizeof(src1) * 8) { \
dest = src1 >> (sizeof(src1) * 8 - 1); \
} else if (tmp < 0) { \
dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
} else { \
dest = src1 << tmp; \
if ((dest >> tmp) != src1) { \
SET_QC(); \
dest = ~0; \
} \
}} while (0)
NEON_VOP(qrshl_u8, neon_u8, 4)
NEON_VOP(qrshl_u16, neon_u16, 2)
#undef NEON_FN
/* The addition of the rounding constant may overflow, so we use an
* intermediate 64 bits accumulator. */
uint32_t HELPER(neon_qrshl_u32)(uint32_t val, uint32_t shiftop)
{
uint32_t dest;
int8_t shift = (int8_t)shiftop;
if (shift >= 32) {
if (val) {
SET_QC();
dest = ~0;
} else {
dest = 0;
}
} else if (shift < -32) {
dest = 0;
} else if (shift == -32) {
dest = val >> 31;
} else if (shift < 0) {
uint64_t big_dest = ((uint64_t)val + (1 << (-1 - shift)));
dest = big_dest >> -shift;
} else {
dest = val << shift;
if ((dest >> shift) != val) {
SET_QC();
dest = ~0;
}
}
return dest;
}
/* Handling addition overflow with 64 bits inputs values is more
* tricky than with 32 bits values. */
uint64_t HELPER(neon_qrshl_u64)(uint64_t val, uint64_t shiftop)
{
int8_t shift = (int8_t)shiftop;
if (shift >= 64) {
if (val) {
SET_QC();
val = ~0;
}
} else if (shift < -64) {
val = 0;
} else if (shift == -64) {
val >>= 63;
} else if (shift < 0) {
val >>= (-shift - 1);
if (val == UINT64_MAX) {
/* In this case, it means that the rounding constant is 1,
* and the addition would overflow. Return the actual
* result directly. */
val = 0x8000000000000000ULL;
} else {
val++;
val >>= 1;
}
} else { \
uint64_t tmp = val;
val <<= shift;
if ((val >> shift) != tmp) {
SET_QC();
val = ~0;
}
}
return val;
}
#define NEON_FN(dest, src1, src2) do { \
int8_t tmp; \
tmp = (int8_t)src2; \
if (tmp >= (ssize_t)sizeof(src1) * 8) { \
if (src1) { \
SET_QC(); \
dest = (1 << (sizeof(src1) * 8 - 1)); \
if (src1 > 0) { \
dest--; \
} \
} else { \
dest = 0; \
} \
} else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
dest = 0; \
} else if (tmp < 0) { \
dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
} else { \
dest = src1 << tmp; \
if ((dest >> tmp) != src1) { \
SET_QC(); \
dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \
if (src1 > 0) { \
dest--; \
} \
} \
}} while (0)
NEON_VOP(qrshl_s8, neon_s8, 4)
NEON_VOP(qrshl_s16, neon_s16, 2)
#undef NEON_FN
/* The addition of the rounding constant may overflow, so we use an
* intermediate 64 bits accumulator. */
uint32_t HELPER(neon_qrshl_s32)(uint32_t valop, uint32_t shiftop)
{
int32_t dest;
int32_t val = (int32_t)valop;
int8_t shift = (int8_t)shiftop;
if (shift >= 32) {
if (val) {
SET_QC();
dest = (val >> 31) ^ ~SIGNBIT;
} else {
dest = 0;
}
} else if (shift <= -32) {
dest = 0;
} else if (shift < 0) {
int64_t big_dest = ((int64_t)val + (1 << (-1 - shift)));
dest = big_dest >> -shift;
} else {
dest = val << shift;
if ((dest >> shift) != val) {
SET_QC();
dest = (val >> 31) ^ ~SIGNBIT;
}
}
return dest;
}
/* Handling addition overflow with 64 bits inputs values is more
* tricky than with 32 bits values. */
uint64_t HELPER(neon_qrshl_s64)(uint64_t valop, uint64_t shiftop)
{
int8_t shift = (uint8_t)shiftop;
int64_t val = valop;
if (shift >= 64) {
if (val) {
SET_QC();
val = (val >> 63) ^ ~SIGNBIT64;
}
} else if (shift <= -64) {
val = 0;
} else if (shift < 0) {
val >>= (-shift - 1);
if (val == INT64_MAX) {
/* In this case, it means that the rounding constant is 1,
* and the addition would overflow. Return the actual
* result directly. */
val = 0x4000000000000000ULL;
} else {
val++;
val >>= 1;
}
} else {
int64_t tmp = val;
val <<= shift;
if ((val >> shift) != tmp) {
SET_QC();
val = (tmp >> 63) ^ ~SIGNBIT64;
}
}
return val;
}
uint32_t HELPER(neon_add_u8)(uint32_t a, uint32_t b)
{
uint32_t mask;
mask = (a ^ b) & 0x80808080u;
a &= ~0x80808080u;
b &= ~0x80808080u;
return (a + b) ^ mask;
}
uint32_t HELPER(neon_add_u16)(uint32_t a, uint32_t b)
{
uint32_t mask;
mask = (a ^ b) & 0x80008000u;
a &= ~0x80008000u;
b &= ~0x80008000u;
return (a + b) ^ mask;
}
#define NEON_FN(dest, src1, src2) dest = src1 + src2
NEON_POP(padd_u8, neon_u8, 4)
NEON_POP(padd_u16, neon_u16, 2)
#undef NEON_FN
#define NEON_FN(dest, src1, src2) dest = src1 - src2
NEON_VOP(sub_u8, neon_u8, 4)
NEON_VOP(sub_u16, neon_u16, 2)
#undef NEON_FN
#define NEON_FN(dest, src1, src2) dest = src1 * src2
NEON_VOP(mul_u8, neon_u8, 4)
NEON_VOP(mul_u16, neon_u16, 2)
#undef NEON_FN
/* Polynomial multiplication is like integer multiplication except the
partial products are XORed, not added. */
uint32_t HELPER(neon_mul_p8)(uint32_t op1, uint32_t op2)
{
uint32_t mask;
uint32_t result;
result = 0;
while (op1) {
mask = 0;
if (op1 & 1)
mask |= 0xff;
if (op1 & (1 << 8))
mask |= (0xff << 8);
if (op1 & (1 << 16))
mask |= (0xff << 16);
if (op1 & (1 << 24))
mask |= (0xff << 24);
result ^= op2 & mask;
op1 = (op1 >> 1) & 0x7f7f7f7f;
op2 = (op2 << 1) & 0xfefefefe;
}
return result;
}
uint64_t HELPER(neon_mull_p8)(uint32_t op1, uint32_t op2)
{
uint64_t result = 0;
uint64_t mask;
uint64_t op2ex = op2;
op2ex = (op2ex & 0xff) |
((op2ex & 0xff00) << 8) |
((op2ex & 0xff0000) << 16) |
((op2ex & 0xff000000) << 24);
while (op1) {
mask = 0;
if (op1 & 1) {
mask |= 0xffff;
}
if (op1 & (1 << 8)) {
mask |= (0xffffU << 16);
}
if (op1 & (1 << 16)) {
mask |= (0xffffULL << 32);
}
if (op1 & (1 << 24)) {
mask |= (0xffffULL << 48);
}
result ^= op2ex & mask;
op1 = (op1 >> 1) & 0x7f7f7f7f;
op2ex <<= 1;
}
return result;
}
#define NEON_FN(dest, src1, src2) dest = (src1 & src2) ? -1 : 0
NEON_VOP(tst_u8, neon_u8, 4)
NEON_VOP(tst_u16, neon_u16, 2)
NEON_VOP(tst_u32, neon_u32, 1)
#undef NEON_FN
#define NEON_FN(dest, src1, src2) dest = (src1 == src2) ? -1 : 0
NEON_VOP(ceq_u8, neon_u8, 4)
NEON_VOP(ceq_u16, neon_u16, 2)
NEON_VOP(ceq_u32, neon_u32, 1)
#undef NEON_FN
#define NEON_FN(dest, src, dummy) dest = (src < 0) ? -src : src
NEON_VOP1(abs_s8, neon_s8, 4)
NEON_VOP1(abs_s16, neon_s16, 2)
#undef NEON_FN
/* Count Leading Sign/Zero Bits. */
static inline int do_clz8(uint8_t x)
{
int n;
for (n = 8; x; n--)
x >>= 1;
return n;
}
static inline int do_clz16(uint16_t x)
{
int n;
for (n = 16; x; n--)
x >>= 1;
return n;
}
#define NEON_FN(dest, src, dummy) dest = do_clz8(src)
NEON_VOP1(clz_u8, neon_u8, 4)
#undef NEON_FN
#define NEON_FN(dest, src, dummy) dest = do_clz16(src)
NEON_VOP1(clz_u16, neon_u16, 2)
#undef NEON_FN
#define NEON_FN(dest, src, dummy) dest = do_clz8((src < 0) ? ~src : src) - 1
NEON_VOP1(cls_s8, neon_s8, 4)
#undef NEON_FN
#define NEON_FN(dest, src, dummy) dest = do_clz16((src < 0) ? ~src : src) - 1
NEON_VOP1(cls_s16, neon_s16, 2)
#undef NEON_FN
uint32_t HELPER(neon_cls_s32)(uint32_t x)
{
int count;
if ((int32_t)x < 0)
x = ~x;
for (count = 32; x; count--)
x = x >> 1;
return count - 1;
}
/* Bit count. */
uint32_t HELPER(neon_cnt_u8)(uint32_t x)
{
x = (x & 0x55555555) + ((x >> 1) & 0x55555555);
x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
x = (x & 0x0f0f0f0f) + ((x >> 4) & 0x0f0f0f0f);
return x;
}
#define NEON_QDMULH16(dest, src1, src2, round) do { \
uint32_t tmp = (int32_t)(int16_t) src1 * (int16_t) src2; \
if ((tmp ^ (tmp << 1)) & SIGNBIT) { \
SET_QC(); \
tmp = (tmp >> 31) ^ ~SIGNBIT; \
} else { \
tmp <<= 1; \
} \
if (round) { \
int32_t old = tmp; \
tmp += 1 << 15; \
if ((int32_t)tmp < old) { \
SET_QC(); \
tmp = SIGNBIT - 1; \
} \
} \
dest = tmp >> 16; \
} while(0)
#define NEON_FN(dest, src1, src2) NEON_QDMULH16(dest, src1, src2, 0)
NEON_VOP(qdmulh_s16, neon_s16, 2)
#undef NEON_FN
#define NEON_FN(dest, src1, src2) NEON_QDMULH16(dest, src1, src2, 1)
NEON_VOP(qrdmulh_s16, neon_s16, 2)
#undef NEON_FN
#undef NEON_QDMULH16
#define NEON_QDMULH32(dest, src1, src2, round) do { \
uint64_t tmp = (int64_t)(int32_t) src1 * (int32_t) src2; \
if ((tmp ^ (tmp << 1)) & SIGNBIT64) { \
SET_QC(); \
tmp = (tmp >> 63) ^ ~SIGNBIT64; \
} else { \
tmp <<= 1; \
} \
if (round) { \
int64_t old = tmp; \
tmp += (int64_t)1 << 31; \
if ((int64_t)tmp < old) { \
SET_QC(); \
tmp = SIGNBIT64 - 1; \
} \
} \
dest = tmp >> 32; \
} while(0)
#define NEON_FN(dest, src1, src2) NEON_QDMULH32(dest, src1, src2, 0)
NEON_VOP(qdmulh_s32, neon_s32, 1)
#undef NEON_FN
#define NEON_FN(dest, src1, src2) NEON_QDMULH32(dest, src1, src2, 1)
NEON_VOP(qrdmulh_s32, neon_s32, 1)
#undef NEON_FN
#undef NEON_QDMULH32
uint32_t HELPER(neon_narrow_u8)(uint64_t x)
{
return (x & 0xffu) | ((x >> 8) & 0xff00u) | ((x >> 16) & 0xff0000u)
| ((x >> 24) & 0xff000000u);
}
uint32_t HELPER(neon_narrow_u16)(uint64_t x)
{
return (x & 0xffffu) | ((x >> 16) & 0xffff0000u);
}
uint32_t HELPER(neon_narrow_high_u8)(uint64_t x)
{
return ((x >> 8) & 0xff) | ((x >> 16) & 0xff00)
| ((x >> 24) & 0xff0000) | ((x >> 32) & 0xff000000);
}
uint32_t HELPER(neon_narrow_high_u16)(uint64_t x)
{
return ((x >> 16) & 0xffff) | ((x >> 32) & 0xffff0000);
}
uint32_t HELPER(neon_narrow_round_high_u8)(uint64_t x)
{
x &= 0xff80ff80ff80ff80ull;
x += 0x0080008000800080ull;
return ((x >> 8) & 0xff) | ((x >> 16) & 0xff00)
| ((x >> 24) & 0xff0000) | ((x >> 32) & 0xff000000);
}
uint32_t HELPER(neon_narrow_round_high_u16)(uint64_t x)
{
x &= 0xffff8000ffff8000ull;
x += 0x0000800000008000ull;
return ((x >> 16) & 0xffff) | ((x >> 32) & 0xffff0000);
}
uint32_t HELPER(neon_unarrow_sat8)(uint64_t x)
{
uint16_t s;
uint8_t d;
uint32_t res = 0;
#define SAT8(n) \
s = x >> n; \
if (s & 0x8000) { \
SET_QC(); \
} else { \
if (s > 0xff) { \
d = 0xff; \
SET_QC(); \
} else { \
d = s; \
} \
res |= (uint32_t)d << (n / 2); \
}
SAT8(0);
SAT8(16);
SAT8(32);
SAT8(48);
#undef SAT8
return res;
}
uint32_t HELPER(neon_narrow_sat_u8)(uint64_t x)
{
uint16_t s;
uint8_t d;
uint32_t res = 0;
#define SAT8(n) \
s = x >> n; \
if (s > 0xff) { \
d = 0xff; \
SET_QC(); \
} else { \
d = s; \
} \
res |= (uint32_t)d << (n / 2);
SAT8(0);
SAT8(16);
SAT8(32);
SAT8(48);
#undef SAT8
return res;
}
uint32_t HELPER(neon_narrow_sat_s8)(uint64_t x)
{
int16_t s;
uint8_t d;
uint32_t res = 0;
#define SAT8(n) \
s = x >> n; \
if (s != (int8_t)s) { \
d = (s >> 15) ^ 0x7f; \
SET_QC(); \
} else { \
d = s; \
} \
res |= (uint32_t)d << (n / 2);
SAT8(0);
SAT8(16);
SAT8(32);
SAT8(48);
#undef SAT8
return res;
}
uint32_t HELPER(neon_unarrow_sat16)(uint64_t x)
{
uint32_t high;
uint32_t low;
low = x;
if (low & 0x80000000) {
low = 0;
SET_QC();
} else if (low > 0xffff) {
low = 0xffff;
SET_QC();
}
high = x >> 32;
if (high & 0x80000000) {
high = 0;
SET_QC();
} else if (high > 0xffff) {
high = 0xffff;
SET_QC();
}
return low | (high << 16);
}
uint32_t HELPER(neon_narrow_sat_u16)(uint64_t x)
{
uint32_t high;
uint32_t low;
low = x;
if (low > 0xffff) {
low = 0xffff;
SET_QC();
}
high = x >> 32;
if (high > 0xffff) {
high = 0xffff;
SET_QC();
}
return low | (high << 16);
}
uint32_t HELPER(neon_narrow_sat_s16)(uint64_t x)
{
int32_t low;
int32_t high;
low = x;
if (low != (int16_t)low) {
low = (low >> 31) ^ 0x7fff;
SET_QC();
}
high = x >> 32;
if (high != (int16_t)high) {
high = (high >> 31) ^ 0x7fff;
SET_QC();
}
return (uint16_t)low | (high << 16);
}
uint32_t HELPER(neon_unarrow_sat32)(uint64_t x)
{
if (x & 0x8000000000000000ull) {
SET_QC();
return 0;
}
if (x > 0xffffffffu) {
SET_QC();
return 0xffffffffu;
}
return x;
}
uint32_t HELPER(neon_narrow_sat_u32)(uint64_t x)
{
if (x > 0xffffffffu) {
SET_QC();
return 0xffffffffu;
}
return x;
}
uint32_t HELPER(neon_narrow_sat_s32)(uint64_t x)
{
if ((int64_t)x != (int32_t)x) {
SET_QC();
return ((int64_t)x >> 63) ^ 0x7fffffff;
}
return x;
}
uint64_t HELPER(neon_widen_u8)(uint32_t x)
{
uint64_t tmp;
uint64_t ret;
ret = (uint8_t)x;
tmp = (uint8_t)(x >> 8);
ret |= tmp << 16;
tmp = (uint8_t)(x >> 16);
ret |= tmp << 32;
tmp = (uint8_t)(x >> 24);
ret |= tmp << 48;
return ret;
}
uint64_t HELPER(neon_widen_s8)(uint32_t x)
{
uint64_t tmp;
uint64_t ret;
ret = (uint16_t)(int8_t)x;
tmp = (uint16_t)(int8_t)(x >> 8);
ret |= tmp << 16;
tmp = (uint16_t)(int8_t)(x >> 16);
ret |= tmp << 32;
tmp = (uint16_t)(int8_t)(x >> 24);
ret |= tmp << 48;
return ret;
}
uint64_t HELPER(neon_widen_u16)(uint32_t x)
{
uint64_t high = (uint16_t)(x >> 16);
return ((uint16_t)x) | (high << 32);
}
uint64_t HELPER(neon_widen_s16)(uint32_t x)
{
uint64_t high = (int16_t)(x >> 16);
return ((uint32_t)(int16_t)x) | (high << 32);
}
uint64_t HELPER(neon_addl_u16)(uint64_t a, uint64_t b)
{
uint64_t mask;
mask = (a ^ b) & 0x8000800080008000ull;
a &= ~0x8000800080008000ull;
b &= ~0x8000800080008000ull;
return (a + b) ^ mask;
}
uint64_t HELPER(neon_addl_u32)(uint64_t a, uint64_t b)
{
uint64_t mask;
mask = (a ^ b) & 0x8000000080000000ull;
a &= ~0x8000000080000000ull;
b &= ~0x8000000080000000ull;
return (a + b) ^ mask;
}
uint64_t HELPER(neon_paddl_u16)(uint64_t a, uint64_t b)
{
uint64_t tmp;
uint64_t tmp2;
tmp = a & 0x0000ffff0000ffffull;
tmp += (a >> 16) & 0x0000ffff0000ffffull;
tmp2 = b & 0xffff0000ffff0000ull;
tmp2 += (b << 16) & 0xffff0000ffff0000ull;
return ( tmp & 0xffff)
| ((tmp >> 16) & 0xffff0000ull)
| ((tmp2 << 16) & 0xffff00000000ull)
| ( tmp2 & 0xffff000000000000ull);
}
uint64_t HELPER(neon_paddl_u32)(uint64_t a, uint64_t b)
{
uint32_t low = a + (a >> 32);
uint32_t high = b + (b >> 32);
return low + ((uint64_t)high << 32);
}
uint64_t HELPER(neon_subl_u16)(uint64_t a, uint64_t b)
{
uint64_t mask;
mask = (a ^ ~b) & 0x8000800080008000ull;
a |= 0x8000800080008000ull;
b &= ~0x8000800080008000ull;
return (a - b) ^ mask;
}
uint64_t HELPER(neon_subl_u32)(uint64_t a, uint64_t b)
{
uint64_t mask;
mask = (a ^ ~b) & 0x8000000080000000ull;
a |= 0x8000000080000000ull;
b &= ~0x8000000080000000ull;
return (a - b) ^ mask;
}
uint64_t HELPER(neon_addl_saturate_s32)(uint64_t a, uint64_t b)
{
uint32_t x, y;
uint32_t low, high;
x = a;
y = b;
low = x + y;
if (((low ^ x) & SIGNBIT) && !((x ^ y) & SIGNBIT)) {
SET_QC();
low = ((int32_t)x >> 31) ^ ~SIGNBIT;
}
x = a >> 32;
y = b >> 32;
high = x + y;
if (((high ^ x) & SIGNBIT) && !((x ^ y) & SIGNBIT)) {
SET_QC();
high = ((int32_t)x >> 31) ^ ~SIGNBIT;
}
return low | ((uint64_t)high << 32);
}
uint64_t HELPER(neon_addl_saturate_s64)(uint64_t a, uint64_t b)
{
uint64_t result;
result = a + b;
if (((result ^ a) & SIGNBIT64) && !((a ^ b) & SIGNBIT64)) {
SET_QC();
result = ((int64_t)a >> 63) ^ ~SIGNBIT64;
}
return result;
}
/* We have to do the arithmetic in a larger type than
* the input type, because for example with a signed 32 bit
* op the absolute difference can overflow a signed 32 bit value.
*/
#define DO_ABD(dest, x, y, intype, arithtype) do { \
arithtype tmp_x = (intype)(x); \
arithtype tmp_y = (intype)(y); \
dest = ((tmp_x > tmp_y) ? tmp_x - tmp_y : tmp_y - tmp_x); \
} while(0)
uint64_t HELPER(neon_abdl_u16)(uint32_t a, uint32_t b)
{
uint64_t tmp;
uint64_t result;
DO_ABD(result, a, b, uint8_t, uint32_t);
DO_ABD(tmp, a >> 8, b >> 8, uint8_t, uint32_t);
result |= tmp << 16;
DO_ABD(tmp, a >> 16, b >> 16, uint8_t, uint32_t);
result |= tmp << 32;
DO_ABD(tmp, a >> 24, b >> 24, uint8_t, uint32_t);
result |= tmp << 48;
return result;
}
uint64_t HELPER(neon_abdl_s16)(uint32_t a, uint32_t b)
{
uint64_t tmp;
uint64_t result;
DO_ABD(result, a, b, int8_t, int32_t);
DO_ABD(tmp, a >> 8, b >> 8, int8_t, int32_t);
result |= tmp << 16;
DO_ABD(tmp, a >> 16, b >> 16, int8_t, int32_t);
result |= tmp << 32;
DO_ABD(tmp, a >> 24, b >> 24, int8_t, int32_t);
result |= tmp << 48;
return result;
}
uint64_t HELPER(neon_abdl_u32)(uint32_t a, uint32_t b)
{
uint64_t tmp;
uint64_t result;
DO_ABD(result, a, b, uint16_t, uint32_t);
DO_ABD(tmp, a >> 16, b >> 16, uint16_t, uint32_t);
return result | (tmp << 32);
}
uint64_t HELPER(neon_abdl_s32)(uint32_t a, uint32_t b)
{
uint64_t tmp;
uint64_t result;
DO_ABD(result, a, b, int16_t, int32_t);
DO_ABD(tmp, a >> 16, b >> 16, int16_t, int32_t);
return result | (tmp << 32);
}
uint64_t HELPER(neon_abdl_u64)(uint32_t a, uint32_t b)
{
uint64_t result;
DO_ABD(result, a, b, uint32_t, uint64_t);
return result;
}
uint64_t HELPER(neon_abdl_s64)(uint32_t a, uint32_t b)
{
uint64_t result;
DO_ABD(result, a, b, int32_t, int64_t);
return result;
}
#undef DO_ABD
/* Widening multiply. Named type is the source type. */
#define DO_MULL(dest, x, y, type1, type2) do { \
type1 tmp_x = x; \
type1 tmp_y = y; \
dest = (type2)((type2)tmp_x * (type2)tmp_y); \
} while(0)
uint64_t HELPER(neon_mull_u8)(uint32_t a, uint32_t b)
{
uint64_t tmp;
uint64_t result;
DO_MULL(result, a, b, uint8_t, uint16_t);
DO_MULL(tmp, a >> 8, b >> 8, uint8_t, uint16_t);
result |= tmp << 16;
DO_MULL(tmp, a >> 16, b >> 16, uint8_t, uint16_t);
result |= tmp << 32;
DO_MULL(tmp, a >> 24, b >> 24, uint8_t, uint16_t);
result |= tmp << 48;
return result;
}
uint64_t HELPER(neon_mull_s8)(uint32_t a, uint32_t b)
{
uint64_t tmp;
uint64_t result;
DO_MULL(result, a, b, int8_t, uint16_t);
DO_MULL(tmp, a >> 8, b >> 8, int8_t, uint16_t);
result |= tmp << 16;
DO_MULL(tmp, a >> 16, b >> 16, int8_t, uint16_t);
result |= tmp << 32;
DO_MULL(tmp, a >> 24, b >> 24, int8_t, uint16_t);
result |= tmp << 48;
return result;
}
uint64_t HELPER(neon_mull_u16)(uint32_t a, uint32_t b)
{
uint64_t tmp;
uint64_t result;
DO_MULL(result, a, b, uint16_t, uint32_t);
DO_MULL(tmp, a >> 16, b >> 16, uint16_t, uint32_t);
return result | (tmp << 32);
}
uint64_t HELPER(neon_mull_s16)(uint32_t a, uint32_t b)
{
uint64_t tmp;
uint64_t result;
DO_MULL(result, a, b, int16_t, uint32_t);
DO_MULL(tmp, a >> 16, b >> 16, int16_t, uint32_t);
return result | (tmp << 32);
}
uint64_t HELPER(neon_negl_u16)(uint64_t x)
{
uint16_t tmp;
uint64_t result;
result = (uint16_t)-x;
tmp = -(x >> 16);
result |= (uint64_t)tmp << 16;
tmp = -(x >> 32);
result |= (uint64_t)tmp << 32;
tmp = -(x >> 48);
result |= (uint64_t)tmp << 48;
return result;
}
uint64_t HELPER(neon_negl_u32)(uint64_t x)
{
uint32_t low = -x;
uint32_t high = -(x >> 32);
return low | ((uint64_t)high << 32);
}
/* FIXME: There should be a native op for this. */
uint64_t HELPER(neon_negl_u64)(uint64_t x)
{
return -x;
}
/* Saturnating sign manuipulation. */
/* ??? Make these use NEON_VOP1 */
#define DO_QABS8(x) do { \
if (x == (int8_t)0x80) { \
x = 0x7f; \
SET_QC(); \
} else if (x < 0) { \
x = -x; \
}} while (0)
uint32_t HELPER(neon_qabs_s8)(uint32_t x)
{
neon_s8 vec;
NEON_UNPACK(neon_s8, vec, x);
DO_QABS8(vec.v1);
DO_QABS8(vec.v2);
DO_QABS8(vec.v3);
DO_QABS8(vec.v4);
NEON_PACK(neon_s8, x, vec);
return x;
}
#undef DO_QABS8
#define DO_QNEG8(x) do { \
if (x == (int8_t)0x80) { \
x = 0x7f; \
SET_QC(); \
} else { \
x = -x; \
}} while (0)
uint32_t HELPER(neon_qneg_s8)(uint32_t x)
{
neon_s8 vec;
NEON_UNPACK(neon_s8, vec, x);
DO_QNEG8(vec.v1);
DO_QNEG8(vec.v2);
DO_QNEG8(vec.v3);
DO_QNEG8(vec.v4);
NEON_PACK(neon_s8, x, vec);
return x;
}
#undef DO_QNEG8
#define DO_QABS16(x) do { \
if (x == (int16_t)0x8000) { \
x = 0x7fff; \
SET_QC(); \
} else if (x < 0) { \
x = -x; \
}} while (0)
uint32_t HELPER(neon_qabs_s16)(uint32_t x)
{
neon_s16 vec;
NEON_UNPACK(neon_s16, vec, x);
DO_QABS16(vec.v1);
DO_QABS16(vec.v2);
NEON_PACK(neon_s16, x, vec);
return x;
}
#undef DO_QABS16
#define DO_QNEG16(x) do { \
if (x == (int16_t)0x8000) { \
x = 0x7fff; \
SET_QC(); \
} else { \
x = -x; \
}} while (0)
uint32_t HELPER(neon_qneg_s16)(uint32_t x)
{
neon_s16 vec;
NEON_UNPACK(neon_s16, vec, x);
DO_QNEG16(vec.v1);
DO_QNEG16(vec.v2);
NEON_PACK(neon_s16, x, vec);
return x;
}
#undef DO_QNEG16
uint32_t HELPER(neon_qabs_s32)(uint32_t x)
{
if (x == SIGNBIT) {
SET_QC();
x = ~SIGNBIT;
} else if ((int32_t)x < 0) {
x = -x;
}
return x;
}
uint32_t HELPER(neon_qneg_s32)(uint32_t x)
{
if (x == SIGNBIT) {
SET_QC();
x = ~SIGNBIT;
} else {
x = -x;
}
return x;
}
/* NEON Float helpers. */
uint32_t HELPER(neon_min_f32)(uint32_t a, uint32_t b)
{
return float32_val(float32_min(make_float32(a), make_float32(b), NFS));
}
uint32_t HELPER(neon_max_f32)(uint32_t a, uint32_t b)
{
return float32_val(float32_max(make_float32(a), make_float32(b), NFS));
}
uint32_t HELPER(neon_abd_f32)(uint32_t a, uint32_t b)
{
float32 f0 = make_float32(a);
float32 f1 = make_float32(b);
return float32_val(float32_abs(float32_sub(f0, f1, NFS)));
}
uint32_t HELPER(neon_add_f32)(uint32_t a, uint32_t b)
{
return float32_val(float32_add(make_float32(a), make_float32(b), NFS));
}
uint32_t HELPER(neon_sub_f32)(uint32_t a, uint32_t b)
{
return float32_val(float32_sub(make_float32(a), make_float32(b), NFS));
}
uint32_t HELPER(neon_mul_f32)(uint32_t a, uint32_t b)
{
return float32_val(float32_mul(make_float32(a), make_float32(b), NFS));
}
/* Floating point comparisons produce an integer result. */
#define NEON_VOP_FCMP(name, ok) \
uint32_t HELPER(neon_##name)(uint32_t a, uint32_t b) \
{ \
switch (float32_compare_quiet(make_float32(a), make_float32(b), NFS)) { \
ok return ~0; \
default: return 0; \
} \
}
NEON_VOP_FCMP(ceq_f32, case float_relation_equal:)
NEON_VOP_FCMP(cge_f32, case float_relation_equal: case float_relation_greater:)
NEON_VOP_FCMP(cgt_f32, case float_relation_greater:)
uint32_t HELPER(neon_acge_f32)(uint32_t a, uint32_t b)
{
float32 f0 = float32_abs(make_float32(a));
float32 f1 = float32_abs(make_float32(b));
switch (float32_compare_quiet(f0, f1, NFS)) {
case float_relation_equal:
case float_relation_greater:
return ~0;
default:
return 0;
}
}
uint32_t HELPER(neon_acgt_f32)(uint32_t a, uint32_t b)
{
float32 f0 = float32_abs(make_float32(a));
float32 f1 = float32_abs(make_float32(b));
if (float32_compare_quiet(f0, f1, NFS) == float_relation_greater) {
return ~0;
}
return 0;
}
#define ELEM(V, N, SIZE) (((V) >> ((N) * (SIZE))) & ((1ull << (SIZE)) - 1))
void HELPER(neon_qunzip8)(uint32_t rd, uint32_t rm)
{
uint64_t zm0 = float64_val(env->vfp.regs[rm]);
uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
uint64_t zd0 = float64_val(env->vfp.regs[rd]);
uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]);
uint64_t d0 = ELEM(zd0, 0, 8) | (ELEM(zd0, 2, 8) << 8)
| (ELEM(zd0, 4, 8) << 16) | (ELEM(zd0, 6, 8) << 24)
| (ELEM(zd1, 0, 8) << 32) | (ELEM(zd1, 2, 8) << 40)
| (ELEM(zd1, 4, 8) << 48) | (ELEM(zd1, 6, 8) << 56);
uint64_t d1 = ELEM(zm0, 0, 8) | (ELEM(zm0, 2, 8) << 8)
| (ELEM(zm0, 4, 8) << 16) | (ELEM(zm0, 6, 8) << 24)
| (ELEM(zm1, 0, 8) << 32) | (ELEM(zm1, 2, 8) << 40)
| (ELEM(zm1, 4, 8) << 48) | (ELEM(zm1, 6, 8) << 56);
uint64_t m0 = ELEM(zd0, 1, 8) | (ELEM(zd0, 3, 8) << 8)
| (ELEM(zd0, 5, 8) << 16) | (ELEM(zd0, 7, 8) << 24)
| (ELEM(zd1, 1, 8) << 32) | (ELEM(zd1, 3, 8) << 40)
| (ELEM(zd1, 5, 8) << 48) | (ELEM(zd1, 7, 8) << 56);
uint64_t m1 = ELEM(zm0, 1, 8) | (ELEM(zm0, 3, 8) << 8)
| (ELEM(zm0, 5, 8) << 16) | (ELEM(zm0, 7, 8) << 24)
| (ELEM(zm1, 1, 8) << 32) | (ELEM(zm1, 3, 8) << 40)
| (ELEM(zm1, 5, 8) << 48) | (ELEM(zm1, 7, 8) << 56);
env->vfp.regs[rm] = make_float64(m0);
env->vfp.regs[rm + 1] = make_float64(m1);
env->vfp.regs[rd] = make_float64(d0);
env->vfp.regs[rd + 1] = make_float64(d1);
}
void HELPER(neon_qunzip16)(uint32_t rd, uint32_t rm)
{
uint64_t zm0 = float64_val(env->vfp.regs[rm]);
uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
uint64_t zd0 = float64_val(env->vfp.regs[rd]);
uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]);
uint64_t d0 = ELEM(zd0, 0, 16) | (ELEM(zd0, 2, 16) << 16)
| (ELEM(zd1, 0, 16) << 32) | (ELEM(zd1, 2, 16) << 48);
uint64_t d1 = ELEM(zm0, 0, 16) | (ELEM(zm0, 2, 16) << 16)
| (ELEM(zm1, 0, 16) << 32) | (ELEM(zm1, 2, 16) << 48);
uint64_t m0 = ELEM(zd0, 1, 16) | (ELEM(zd0, 3, 16) << 16)
| (ELEM(zd1, 1, 16) << 32) | (ELEM(zd1, 3, 16) << 48);
uint64_t m1 = ELEM(zm0, 1, 16) | (ELEM(zm0, 3, 16) << 16)
| (ELEM(zm1, 1, 16) << 32) | (ELEM(zm1, 3, 16) << 48);
env->vfp.regs[rm] = make_float64(m0);
env->vfp.regs[rm + 1] = make_float64(m1);
env->vfp.regs[rd] = make_float64(d0);
env->vfp.regs[rd + 1] = make_float64(d1);
}
void HELPER(neon_qunzip32)(uint32_t rd, uint32_t rm)
{
uint64_t zm0 = float64_val(env->vfp.regs[rm]);
uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
uint64_t zd0 = float64_val(env->vfp.regs[rd]);
uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]);
uint64_t d0 = ELEM(zd0, 0, 32) | (ELEM(zd1, 0, 32) << 32);
uint64_t d1 = ELEM(zm0, 0, 32) | (ELEM(zm1, 0, 32) << 32);
uint64_t m0 = ELEM(zd0, 1, 32) | (ELEM(zd1, 1, 32) << 32);
uint64_t m1 = ELEM(zm0, 1, 32) | (ELEM(zm1, 1, 32) << 32);
env->vfp.regs[rm] = make_float64(m0);
env->vfp.regs[rm + 1] = make_float64(m1);
env->vfp.regs[rd] = make_float64(d0);
env->vfp.regs[rd + 1] = make_float64(d1);
}
void HELPER(neon_unzip8)(uint32_t rd, uint32_t rm)
{
uint64_t zm = float64_val(env->vfp.regs[rm]);
uint64_t zd = float64_val(env->vfp.regs[rd]);
uint64_t d0 = ELEM(zd, 0, 8) | (ELEM(zd, 2, 8) << 8)
| (ELEM(zd, 4, 8) << 16) | (ELEM(zd, 6, 8) << 24)
| (ELEM(zm, 0, 8) << 32) | (ELEM(zm, 2, 8) << 40)
| (ELEM(zm, 4, 8) << 48) | (ELEM(zm, 6, 8) << 56);
uint64_t m0 = ELEM(zd, 1, 8) | (ELEM(zd, 3, 8) << 8)
| (ELEM(zd, 5, 8) << 16) | (ELEM(zd, 7, 8) << 24)
| (ELEM(zm, 1, 8) << 32) | (ELEM(zm, 3, 8) << 40)
| (ELEM(zm, 5, 8) << 48) | (ELEM(zm, 7, 8) << 56);
env->vfp.regs[rm] = make_float64(m0);
env->vfp.regs[rd] = make_float64(d0);
}
void HELPER(neon_unzip16)(uint32_t rd, uint32_t rm)
{
uint64_t zm = float64_val(env->vfp.regs[rm]);
uint64_t zd = float64_val(env->vfp.regs[rd]);
uint64_t d0 = ELEM(zd, 0, 16) | (ELEM(zd, 2, 16) << 16)
| (ELEM(zm, 0, 16) << 32) | (ELEM(zm, 2, 16) << 48);
uint64_t m0 = ELEM(zd, 1, 16) | (ELEM(zd, 3, 16) << 16)
| (ELEM(zm, 1, 16) << 32) | (ELEM(zm, 3, 16) << 48);
env->vfp.regs[rm] = make_float64(m0);
env->vfp.regs[rd] = make_float64(d0);
}
void HELPER(neon_qzip8)(uint32_t rd, uint32_t rm)
{
uint64_t zm0 = float64_val(env->vfp.regs[rm]);
uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
uint64_t zd0 = float64_val(env->vfp.regs[rd]);
uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]);
uint64_t d0 = ELEM(zd0, 0, 8) | (ELEM(zm0, 0, 8) << 8)
| (ELEM(zd0, 1, 8) << 16) | (ELEM(zm0, 1, 8) << 24)
| (ELEM(zd0, 2, 8) << 32) | (ELEM(zm0, 2, 8) << 40)
| (ELEM(zd0, 3, 8) << 48) | (ELEM(zm0, 3, 8) << 56);
uint64_t d1 = ELEM(zd0, 4, 8) | (ELEM(zm0, 4, 8) << 8)
| (ELEM(zd0, 5, 8) << 16) | (ELEM(zm0, 5, 8) << 24)
| (ELEM(zd0, 6, 8) << 32) | (ELEM(zm0, 6, 8) << 40)
| (ELEM(zd0, 7, 8) << 48) | (ELEM(zm0, 7, 8) << 56);
uint64_t m0 = ELEM(zd1, 0, 8) | (ELEM(zm1, 0, 8) << 8)
| (ELEM(zd1, 1, 8) << 16) | (ELEM(zm1, 1, 8) << 24)
| (ELEM(zd1, 2, 8) << 32) | (ELEM(zm1, 2, 8) << 40)
| (ELEM(zd1, 3, 8) << 48) | (ELEM(zm1, 3, 8) << 56);
uint64_t m1 = ELEM(zd1, 4, 8) | (ELEM(zm1, 4, 8) << 8)
| (ELEM(zd1, 5, 8) << 16) | (ELEM(zm1, 5, 8) << 24)
| (ELEM(zd1, 6, 8) << 32) | (ELEM(zm1, 6, 8) << 40)
| (ELEM(zd1, 7, 8) << 48) | (ELEM(zm1, 7, 8) << 56);
env->vfp.regs[rm] = make_float64(m0);
env->vfp.regs[rm + 1] = make_float64(m1);
env->vfp.regs[rd] = make_float64(d0);
env->vfp.regs[rd + 1] = make_float64(d1);
}
void HELPER(neon_qzip16)(uint32_t rd, uint32_t rm)
{
uint64_t zm0 = float64_val(env->vfp.regs[rm]);
uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
uint64_t zd0 = float64_val(env->vfp.regs[rd]);
uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]);
uint64_t d0 = ELEM(zd0, 0, 16) | (ELEM(zm0, 0, 16) << 16)
| (ELEM(zd0, 1, 16) << 32) | (ELEM(zm0, 1, 16) << 48);
uint64_t d1 = ELEM(zd0, 2, 16) | (ELEM(zm0, 2, 16) << 16)
| (ELEM(zd0, 3, 16) << 32) | (ELEM(zm0, 3, 16) << 48);
uint64_t m0 = ELEM(zd1, 0, 16) | (ELEM(zm1, 0, 16) << 16)
| (ELEM(zd1, 1, 16) << 32) | (ELEM(zm1, 1, 16) << 48);
uint64_t m1 = ELEM(zd1, 2, 16) | (ELEM(zm1, 2, 16) << 16)
| (ELEM(zd1, 3, 16) << 32) | (ELEM(zm1, 3, 16) << 48);
env->vfp.regs[rm] = make_float64(m0);
env->vfp.regs[rm + 1] = make_float64(m1);
env->vfp.regs[rd] = make_float64(d0);
env->vfp.regs[rd + 1] = make_float64(d1);
}
void HELPER(neon_qzip32)(uint32_t rd, uint32_t rm)
{
uint64_t zm0 = float64_val(env->vfp.regs[rm]);
uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
uint64_t zd0 = float64_val(env->vfp.regs[rd]);
uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]);
uint64_t d0 = ELEM(zd0, 0, 32) | (ELEM(zm0, 0, 32) << 32);
uint64_t d1 = ELEM(zd0, 1, 32) | (ELEM(zm0, 1, 32) << 32);
uint64_t m0 = ELEM(zd1, 0, 32) | (ELEM(zm1, 0, 32) << 32);
uint64_t m1 = ELEM(zd1, 1, 32) | (ELEM(zm1, 1, 32) << 32);
env->vfp.regs[rm] = make_float64(m0);
env->vfp.regs[rm + 1] = make_float64(m1);
env->vfp.regs[rd] = make_float64(d0);
env->vfp.regs[rd + 1] = make_float64(d1);
}
void HELPER(neon_zip8)(uint32_t rd, uint32_t rm)
{
uint64_t zm = float64_val(env->vfp.regs[rm]);
uint64_t zd = float64_val(env->vfp.regs[rd]);
uint64_t d0 = ELEM(zd, 0, 8) | (ELEM(zm, 0, 8) << 8)
| (ELEM(zd, 1, 8) << 16) | (ELEM(zm, 1, 8) << 24)
| (ELEM(zd, 2, 8) << 32) | (ELEM(zm, 2, 8) << 40)
| (ELEM(zd, 3, 8) << 48) | (ELEM(zm, 3, 8) << 56);
uint64_t m0 = ELEM(zd, 4, 8) | (ELEM(zm, 4, 8) << 8)
| (ELEM(zd, 5, 8) << 16) | (ELEM(zm, 5, 8) << 24)
| (ELEM(zd, 6, 8) << 32) | (ELEM(zm, 6, 8) << 40)
| (ELEM(zd, 7, 8) << 48) | (ELEM(zm, 7, 8) << 56);
env->vfp.regs[rm] = make_float64(m0);
env->vfp.regs[rd] = make_float64(d0);
}
void HELPER(neon_zip16)(uint32_t rd, uint32_t rm)
{
uint64_t zm = float64_val(env->vfp.regs[rm]);
uint64_t zd = float64_val(env->vfp.regs[rd]);
uint64_t d0 = ELEM(zd, 0, 16) | (ELEM(zm, 0, 16) << 16)
| (ELEM(zd, 1, 16) << 32) | (ELEM(zm, 1, 16) << 48);
uint64_t m0 = ELEM(zd, 2, 16) | (ELEM(zm, 2, 16) << 16)
| (ELEM(zd, 3, 16) << 32) | (ELEM(zm, 3, 16) << 48);
env->vfp.regs[rm] = make_float64(m0);
env->vfp.regs[rd] = make_float64(d0);
}
| gpl-2.0 |
miguelangelramirez/magento.dev | app/code/core/Mage/CurrencySymbol/Helper/Data.php | 1841 | <?php
/**
* Magento
*
* NOTICE OF LICENSE
*
* This source file is subject to the Open Software License (OSL 3.0)
* that is bundled with this package in the file LICENSE.txt.
* It is also available through the world-wide-web at this URL:
* http://opensource.org/licenses/osl-3.0.php
* If you did not receive a copy of the license and are unable to
* obtain it through the world-wide-web, please send an email
* to [email protected] so we can send you a copy immediately.
*
* DISCLAIMER
*
* Do not edit or add to this file if you wish to upgrade Magento to newer
* versions in the future. If you wish to customize Magento for your
* needs please refer to http://www.magento.com for more information.
*
* @category Mage
* @package Mage_CurrencySymbol
* @copyright Copyright (c) 2006-2017 X.commerce, Inc. and affiliates (http://www.magento.com)
* @license http://opensource.org/licenses/osl-3.0.php Open Software License (OSL 3.0)
*/
/**
* Currency Symbol helper
*
* @category Mage
* @package Mage_CurrencySymbol
* @author Magento Core Team <[email protected]>
*/
class Mage_CurrencySymbol_Helper_Data extends Mage_Core_Helper_Data
{
/**
* Get currency display options
*
* @param string $baseCode
* @return array
*/
public function getCurrencyOptions($baseCode)
{
$currencyOptions = array();
$currencySymbol = Mage::getModel('currencysymbol/system_currencysymbol');
if($currencySymbol) {
$customCurrencySymbol = $currencySymbol->getCurrencySymbol($baseCode);
if ($customCurrencySymbol) {
$currencyOptions['symbol'] = $customCurrencySymbol;
$currencyOptions['display'] = Zend_Currency::USE_SYMBOL;
}
}
return $currencyOptions;
}
}
| gpl-2.0 |
xoox/linux-2.6.18_pro500 | mvl_patches/pro50-1481.c | 447 | /*
* Author: MontaVista Software, Inc. <[email protected]>
*
* 2008 (c) MontaVista Software, Inc. This file is licensed under
* the terms of the GNU General Public License version 2. This program
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
#include <linux/init.h>
#include <linux/mvl_patch.h>
static __init int regpatch(void)
{
return mvl_register_patch(1481);
}
module_init(regpatch);
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.