-
-
-
-
`
-
- createCollapsibles(taskEntry)
-
- task['numOutputsTotal'] = numOutputsTotal
- task['taskStatusLabel'] = taskEntry.querySelector('.taskStatusLabel')
- task['outputContainer'] = taskEntry.querySelector('.img-preview')
- task['outputMsg'] = taskEntry.querySelector('.outputMsg')
- task['previewPrompt'] = taskEntry.querySelector('.preview-prompt')
- task['progressBar'] = taskEntry.querySelector('.progressBar')
- task['stopTask'] = taskEntry.querySelector('.stopTask')
-
- task['stopTask'].addEventListener('click', async function() {
- if (task['isProcessing']) {
- task.isProcessing = false
- try {
- let res = await fetch('image/stop')
- } catch (e) {
- console.trace(e)
- }
- } else {
- let idx = taskQueue.indexOf(task)
- if (idx >= 0) {
- taskQueue.splice(idx, 1)
- }
-
- taskEntry.remove()
- }
- })
-
- imagePreview.insertBefore(taskEntry, previewTools.nextSibling)
-
- task['previewPrompt'].innerText = prompt
-
- taskQueue.unshift(task)
-}
-
-// create a file name with embedded prompt and metadata
-// for easier cateloging and comparison
-function createFileName(seed, outputFormat) {
-
- // Most important information is the prompt
- let underscoreName = lastPromptUsed.replace(/[^a-zA-Z0-9]/g, '_')
- underscoreName = underscoreName.substring(0, 100)
- const steps = numInferenceStepsField.value
- const guidance = guidanceScaleField.value
-
- // name and the top level metadata
- let fileName = `${underscoreName}_Seed-${seed}_Steps-${steps}_Guidance-${guidance}`
-
- // add the tags
- // let tags = []
- // let tagString = ''
- // document.querySelectorAll(modifyTagsSelector).forEach(function(tag) {
- // tags.push(tag.innerHTML)
- // })
-
- // join the tags with a pipe
- // if (activeTags.length > 0) {
- // tagString = '_Tags-'
- // tagString += tags.join('|')
- // }
-
- // // append empty or populated tags
- // fileName += `${tagString}`
-
- // add the file extension
- fileName += '.' + (outputFormat === 'png' ? 'png' : 'jpeg')
-
- return fileName
-}
-
-async function stopAllTasks() {
- taskQueue.forEach(task => {
- task.isProcessing = false
- })
- taskQueue = []
-
- if (currentTask !== null) {
- currentTask.isProcessing = false
- }
-
- try {
- let res = await fetch('image/stop')
- } catch (e) {
- console.trace(e)
- }
-}
-
-clearAllPreviewsBtn.addEventListener('click', async function() {
- await stopAllTasks()
-
- let taskEntries = document.querySelectorAll('.imageTaskContainer')
- taskEntries.forEach(task => {
- task.remove()
- })
-
- previewTools.style.display = 'none'
- initialText.style.display = 'block'
-})
-
-stopImageBtn.addEventListener('click', async function() {
- await stopAllTasks()
-})
-
-soundToggle.addEventListener('click', handleBoolSettingChange(SOUND_ENABLED_KEY))
-soundToggle.checked = isSoundEnabled()
-
-saveToDiskField.checked = isSaveToDiskEnabled()
-diskPathField.disabled = !saveToDiskField.checked
-
-useFaceCorrectionField.addEventListener('click', handleBoolSettingChange(USE_FACE_CORRECTION_KEY))
-useFaceCorrectionField.checked = isFaceCorrectionEnabled()
-
-useUpscalingField.checked = isUpscalingEnabled()
-upscaleModelField.disabled = !useUpscalingField.checked
-
-showOnlyFilteredImageField.addEventListener('click', handleBoolSettingChange(SHOW_ONLY_FILTERED_IMAGE_KEY))
-showOnlyFilteredImageField.checked = isShowOnlyFilteredImageEnabled()
-
-useCPUField.addEventListener('click', handleBoolSettingChange(USE_CPU_KEY))
-useCPUField.checked = isUseCPUEnabled()
-
-useFullPrecisionField.addEventListener('click', handleBoolSettingChange(USE_FULL_PRECISION_KEY))
-useFullPrecisionField.checked = isUseFullPrecisionEnabled()
-
-turboField.addEventListener('click', handleBoolSettingChange(USE_TURBO_MODE_KEY))
-turboField.checked = isUseTurboModeEnabled()
-
-streamImageProgressField.addEventListener('click', handleBoolSettingChange(STREAM_IMAGE_PROGRESS_KEY))
-streamImageProgressField.checked = isStreamImageProgressEnabled()
-
-diskPathField.addEventListener('change', handleStringSettingChange(DISK_PATH_KEY))
-
-saveToDiskField.addEventListener('click', function(e) {
- diskPathField.disabled = !this.checked
- handleBoolSettingChange(SAVE_TO_DISK_KEY)(e)
-})
-
-useUpscalingField.addEventListener('click', function(e) {
- upscaleModelField.disabled = !this.checked
- handleBoolSettingChange(USE_UPSCALING_KEY)(e)
-})
-
-function setPanelOpen(panelHandle) {
- let panelContents = panelHandle.nextElementSibling
- panelHandle.classList.add('active')
- panelContents.style.display = 'block'
-}
-
-if (isAdvancedPanelOpenEnabled()) {
- setPanelOpen(advancedPanelHandle)
-}
-
-if (isModifiersPanelOpenEnabled()) {
- setPanelOpen(modifiersPanelHandle)
-}
-
-makeImageBtn.addEventListener('click', makeImage)
-
-
-function updateGuidanceScale() {
- guidanceScaleField.value = guidanceScaleSlider.value / 10
-}
-
-function updateGuidanceScaleSlider() {
- if (guidanceScaleField.value < 0) {
- guidanceScaleField.value = 0
- } else if (guidanceScaleField.value > 50) {
- guidanceScaleField.value = 50
- }
-
- guidanceScaleSlider.value = guidanceScaleField.value * 10
-}
-
-guidanceScaleSlider.addEventListener('input', updateGuidanceScale)
-guidanceScaleField.addEventListener('input', updateGuidanceScaleSlider)
-updateGuidanceScale()
-
-function updatePromptStrength() {
- promptStrengthField.value = promptStrengthSlider.value / 100
-}
-
-function updatePromptStrengthSlider() {
- if (promptStrengthField.value < 0) {
- promptStrengthField.value = 0
- } else if (promptStrengthField.value > 0.99) {
- promptStrengthField.value = 0.99
- }
-
- promptStrengthSlider.value = promptStrengthField.value * 100
-}
-
-promptStrengthSlider.addEventListener('input', updatePromptStrength)
-promptStrengthField.addEventListener('input', updatePromptStrengthSlider)
-updatePromptStrength()
-
-useBetaChannelField.addEventListener('click', async function(e) {
- if (serverStatus !== 'online') {
- // logError('The server is still starting up..')
- alert('The server is still starting up..')
- e.preventDefault()
- return false
- }
-
- let updateBranch = (this.checked ? 'beta' : 'main')
-
- try {
- let res = await fetch('app_config', {
- method: 'POST',
- headers: {
- 'Content-Type': 'application/json'
- },
- body: JSON.stringify({
- 'update_branch': updateBranch
- })
- })
- res = await res.json()
-
- console.log('set config status response', res)
- } catch (e) {
- console.trace('set config status error', e)
- }
-})
-
-async function getAppConfig() {
- try {
- let res = await fetch('app_config')
- config = await res.json()
-
- if (config.update_branch === 'beta') {
- useBetaChannelField.checked = true
- updateBranchLabel.innerText = "(beta)"
- }
-
- console.log('get config status response', config)
- } catch (e) {
- console.trace('get config status error', e)
- }
-}
-
-async function getModels() {
- try {
- let res = await fetch('models')
- models = await res.json()
-
- let activeModel = models['active']
- let modelOptions = models['options']
- let stableDiffusionOptions = modelOptions['stable-diffusion']
-
- stableDiffusionOptions.forEach(modelName => {
- let modelOption = document.createElement('option')
- modelOption.value = modelName
- modelOption.innerText = modelName
-
- if (modelName === activeModel['stable-diffusion']) {
- modelOption.selected = true
- }
-
- stableDiffusionModelField.appendChild(modelOption)
- })
-
- console.log('get models response', config)
- } catch (e) {
- console.trace('get models error', e)
- }
-}
-
-function checkRandomSeed() {
- if (randomSeedField.checked) {
- seedField.disabled = true
- seedField.value = "0"
- } else {
- seedField.disabled = false
- }
-}
-randomSeedField.addEventListener('input', checkRandomSeed)
-checkRandomSeed()
-
-function showInitImagePreview() {
- if (initImageSelector.files.length === 0) {
- initImagePreviewContainer.style.display = 'none'
- // inpaintingEditorContainer.style.display = 'none'
- promptStrengthContainer.style.display = 'none'
- // maskSetting.style.display = 'none'
- return
- }
-
- let reader = new FileReader()
- let file = initImageSelector.files[0]
-
- reader.addEventListener('load', function() {
- // console.log(file.name, reader.result)
- initImagePreview.src = reader.result
- initImagePreviewContainer.style.display = 'block'
- inpaintingEditorContainer.style.display = 'none'
- promptStrengthContainer.style.display = 'block'
- samplerSelectionContainer.style.display = 'none'
- // maskSetting.checked = false
- })
-
- if (file) {
- reader.readAsDataURL(file)
- }
-}
-initImageSelector.addEventListener('change', showInitImagePreview)
-showInitImagePreview()
-
-initImagePreview.addEventListener('load', function() {
- inpaintingEditorCanvasBackground.style.backgroundImage = "url('" + this.src + "')"
- // maskSetting.style.display = 'block'
- // inpaintingEditorContainer.style.display = 'block'
-})
-
-initImageClearBtn.addEventListener('click', function() {
- initImageSelector.value = null
- // maskImageSelector.value = null
-
- initImagePreview.src = ''
- // maskImagePreview.src = ''
- maskSetting.checked = false
-
- initImagePreviewContainer.style.display = 'none'
- // inpaintingEditorContainer.style.display = 'none'
- // maskImagePreviewContainer.style.display = 'none'
-
- // maskSetting.style.display = 'none'
-
- promptStrengthContainer.style.display = 'none'
- samplerSelectionContainer.style.display = 'block'
-})
-
-maskSetting.addEventListener('click', function() {
- inpaintingEditorContainer.style.display = (this.checked ? 'block' : 'none')
-})
-
-promptsFromFileBtn.addEventListener('click', function() {
- promptsFromFileSelector.click()
-})
-
-promptsFromFileSelector.addEventListener('change', function() {
- if (promptsFromFileSelector.files.length === 0) {
- return
- }
-
- let reader = new FileReader()
- let file = promptsFromFileSelector.files[0]
-
- reader.addEventListener('load', function() {
- promptField.value = reader.result
- })
-
- if (file) {
- reader.readAsText(file)
- }
-})
-
-// function showMaskImagePreview() {
-// if (maskImageSelector.files.length === 0) {
-// // maskImagePreviewContainer.style.display = 'none'
-// return
-// }
-
-// let reader = new FileReader()
-// let file = maskImageSelector.files[0]
-
-// reader.addEventListener('load', function() {
-// // maskImagePreview.src = reader.result
-// // maskImagePreviewContainer.style.display = 'block'
-// })
-
-// if (file) {
-// reader.readAsDataURL(file)
-// }
-// }
-// maskImageSelector.addEventListener('change', showMaskImagePreview)
-// showMaskImagePreview()
-
-// maskImageClearBtn.addEventListener('click', function() {
-// maskImageSelector.value = null
-// maskImagePreview.src = ''
-// // maskImagePreviewContainer.style.display = 'none'
-// })
-
-// https://stackoverflow.com/a/8212878
-function millisecondsToStr(milliseconds) {
- function numberEnding (number) {
- return (number > 1) ? 's' : ''
- }
-
- var temp = Math.floor(milliseconds / 1000)
- var hours = Math.floor((temp %= 86400) / 3600)
- var s = ''
- if (hours) {
- s += hours + ' hour' + numberEnding(hours) + ' '
- }
- var minutes = Math.floor((temp %= 3600) / 60)
- if (minutes) {
- s += minutes + ' minute' + numberEnding(minutes) + ' '
- }
- var seconds = temp % 60
- if (!hours && minutes < 4 && seconds) {
- s += seconds + ' second' + numberEnding(seconds)
- }
-
- return s
-}
-
-// https://gomakethings.com/finding-the-next-and-previous-sibling-elements-that-match-a-selector-with-vanilla-js/
-function getNextSibling(elem, selector) {
- // Get the next sibling element
- var sibling = elem.nextElementSibling
-
- // If there's no selector, return the first sibling
- if (!selector) return sibling
-
- // If the sibling matches our selector, use it
- // If not, jump to the next sibling and continue the loop
- while (sibling) {
- if (sibling.matches(selector)) return sibling
- sibling = sibling.nextElementSibling
- }
-}
-
-function createCollapsibles(node) {
- if (!node) {
- node = document
- }
-
- let collapsibles = node.querySelectorAll(".collapsible")
- collapsibles.forEach(function(c) {
- let handle = document.createElement('span')
- handle.className = 'collapsible-handle'
-
- if (c.className.indexOf('active') !== -1) {
- handle.innerHTML = '➖' // minus
- } else {
- handle.innerHTML = '➕' // plus
- }
- c.insertBefore(handle, c.firstChild)
-
- c.addEventListener('click', function() {
- this.classList.toggle("active")
- let content = getNextSibling(this, '.collapsible-content')
- if (content.style.display === "block") {
- content.style.display = "none"
- handle.innerHTML = '➕' // plus
- } else {
- content.style.display = "block"
- handle.innerHTML = '➖' // minus
- }
-
- if (this == advancedPanelHandle) {
- let state = (content.style.display === 'block' ? 'true' : 'false')
- localStorage.setItem(ADVANCED_PANEL_OPEN_KEY, state)
- } else if (this == modifiersPanelHandle) {
- let state = (content.style.display === 'block' ? 'true' : 'false')
- localStorage.setItem(MODIFIERS_PANEL_OPEN_KEY, state)
- }
- })
- })
-}
-createCollapsibles()
-
-function refreshTagsList() {
- editorModifierTagsList.innerHTML = ''
-
- if (activeTags.length == 0) {
- editorTagsContainer.style.display = 'none'
- return
- } else {
- editorTagsContainer.style.display = 'block'
- }
-
- activeTags.forEach((tag, index) => {
- tag.element.querySelector('.modifier-card-image-overlay').innerText = '-'
- tag.element.classList.add('modifier-card-tiny')
-
- editorModifierTagsList.appendChild(tag.element)
-
- tag.element.addEventListener('click', () => {
- let idx = activeTags.indexOf(tag)
-
- if (idx !== -1) {
- activeTags[idx].originElement.classList.remove(activeCardClass)
- activeTags[idx].originElement.querySelector('.modifier-card-image-overlay').innerText = '+'
-
- activeTags.splice(idx, 1)
- refreshTagsList()
- }
- })
- })
-
- let brk = document.createElement('br')
- brk.style.clear = 'both'
- editorModifierTagsList.appendChild(brk)
-}
-
-async function getDiskPath() {
- try {
- let diskPath = getSavedDiskPath()
-
- if (diskPath !== '') {
- diskPathField.value = diskPath
- return
- }
-
- let res = await fetch('output_dir')
- if (res.status === 200) {
- res = await res.json()
- res = res[0]
-
- document.querySelector('#diskPath').value = res
- }
- } catch (e) {
- console.trace('error fetching output dir path', e)
- }
-}
-
-function createModifierCard(name, previews) {
- const modifierCard = document.createElement('div')
- modifierCard.className = 'modifier-card'
- modifierCard.innerHTML = `
-
-
-
+
-
-
![Modifier Image]()
-
-
`
-
- const image = modifierCard.querySelector('.modifier-card-image')
- const errorText = modifierCard.querySelector('.modifier-card-error-label')
- const label = modifierCard.querySelector('.modifier-card-label')
-
- errorText.innerText = 'No Image'
-
- if (typeof previews == 'object') {
- image.src = previews[0]; // portrait
- image.setAttribute('preview-type', 'portrait')
- } else {
- image.remove()
- }
-
- const maxLabelLength = 30
- const nameWithoutBy = name.replace('by ', '')
-
- if(nameWithoutBy.length <= maxLabelLength) {
- label.querySelector('p').innerText = nameWithoutBy
- } else {
- const tooltipText = document.createElement('span')
- tooltipText.className = 'tooltip-text'
- tooltipText.innerText = name
-
- label.classList.add('tooltip')
- label.appendChild(tooltipText)
-
- label.querySelector('p').innerText = nameWithoutBy.substring(0, maxLabelLength) + '...'
- }
-
- return modifierCard
-}
-
-function changePreviewImages(val) {
- const previewImages = document.querySelectorAll('.modifier-card-image-container img')
-
- let previewArr = []
-
- modifiers.map(x => x.modifiers).forEach(x => previewArr.push(...x.map(m => m.previews)))
-
- previewArr = previewArr.map(x => {
- let obj = {}
-
- x.forEach(preview => {
- obj[preview.name] = preview.path
- })
-
- return obj
- })
-
- previewImages.forEach(previewImage => {
- const currentPreviewType = previewImage.getAttribute('preview-type')
- const relativePreviewPath = previewImage.src.split(modifierThumbnailPath + '/').pop()
-
- const previews = previewArr.find(preview => relativePreviewPath == preview[currentPreviewType])
-
- if(typeof previews == 'object') {
- let preview = null
-
- if (val == 'portrait') {
- preview = previews.portrait
- }
- else if (val == 'landscape') {
- preview = previews.landscape
- }
-
- if(preview != null) {
- previewImage.src = `${modifierThumbnailPath}/${preview}`
- previewImage.setAttribute('preview-type', val)
- }
- }
- })
-}
-
-function resizeModifierCards(val) {
- const cardSizePrefix = 'modifier-card-size_'
- const modifierCardClass = 'modifier-card'
-
- const modifierCards = document.querySelectorAll(`.${modifierCardClass}`)
- const cardSize = n => `${cardSizePrefix}${n}`
-
- modifierCards.forEach(card => {
- // remove existing size classes
- const classes = card.className.split(' ').filter(c => !c.startsWith(cardSizePrefix))
- card.className = classes.join(' ').trim()
-
- if(val != 0)
- card.classList.add(cardSize(val))
- })
-}
-
-async function loadModifiers() {
- try {
- let res = await fetch('modifiers.json?v=2')
- if (res.status === 200) {
- res = await res.json()
-
- modifiers = res; // update global variable
-
- res.forEach((modifierGroup, idx) => {
- const title = modifierGroup.category
- const modifiers = modifierGroup.modifiers
-
- const titleEl = document.createElement('h5')
- titleEl.className = 'collapsible'
- titleEl.innerText = title
-
- const modifiersEl = document.createElement('div')
- modifiersEl.classList.add('collapsible-content', 'editor-modifiers-leaf')
-
- if (idx == 0) {
- titleEl.className += ' active'
- modifiersEl.style.display = 'block'
- }
-
- modifiers.forEach(modObj => {
- const modifierName = modObj.modifier
- const modifierPreviews = modObj?.previews?.map(preview => `${modifierThumbnailPath}/${preview.path}`)
-
- const modifierCard = createModifierCard(modifierName, modifierPreviews)
-
- if(typeof modifierCard == 'object') {
- modifiersEl.appendChild(modifierCard)
-
- modifierCard.addEventListener('click', () => {
- if (activeTags.map(x => x.name).includes(modifierName)) {
- // remove modifier from active array
- activeTags = activeTags.filter(x => x.name != modifierName)
- modifierCard.classList.remove(activeCardClass)
-
- modifierCard.querySelector('.modifier-card-image-overlay').innerText = '+'
- } else {
- // add modifier to active array
- activeTags.push({
- 'name': modifierName,
- 'element': modifierCard.cloneNode(true),
- 'originElement': modifierCard,
- 'previews': modifierPreviews
- })
-
- modifierCard.classList.add(activeCardClass)
-
- modifierCard.querySelector('.modifier-card-image-overlay').innerText = '-'
- }
-
- refreshTagsList()
- })
- }
- })
-
- let brk = document.createElement('br')
- brk.style.clear = 'both'
- modifiersEl.appendChild(brk)
-
- let e = document.createElement('div')
- e.appendChild(titleEl)
- e.appendChild(modifiersEl)
-
- editorModifierEntries.appendChild(e)
- })
-
- createCollapsibles(editorModifierEntries)
- }
- } catch (e) {
- console.trace('error fetching modifiers', e)
- }
-}
diff --git a/spaces/awacke1/1.ChatGPT-HuggingFace-Spaces-NLP-Transformers-Pipeline/README.md b/spaces/awacke1/1.ChatGPT-HuggingFace-Spaces-NLP-Transformers-Pipeline/README.md
deleted file mode 100644
index 7f938031e2ec7220c733af38eac04d0e2bf53c95..0000000000000000000000000000000000000000
--- a/spaces/awacke1/1.ChatGPT-HuggingFace-Spaces-NLP-Transformers-Pipeline/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: ChatGPTwithAPI
-emoji: 🚀
-colorFrom: red
-colorTo: indigo
-sdk: gradio
-sdk_version: 3.20.0
-app_file: app.py
-pinned: false
-license: mit
-duplicated_from: AI-ZTH-03-23/1.ChatGPT-HuggingFace-Spaces-NLP-Transformers-Pipeline
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/awacke1/AI-RPG-Self-Play-RLML-Health-Battler-Game/README.md b/spaces/awacke1/AI-RPG-Self-Play-RLML-Health-Battler-Game/README.md
deleted file mode 100644
index fd4b272bb8a8696da8c89ccd2950a9190a42f173..0000000000000000000000000000000000000000
--- a/spaces/awacke1/AI-RPG-Self-Play-RLML-Health-Battler-Game/README.md
+++ /dev/null
@@ -1,32 +0,0 @@
----
-title: 🤖🧠AI-RPG-Self-Play-RLML-Health-Battler-Game🏆🎁🎮
-emoji: 🏋️♀️💪🏥
-colorFrom: pink
-colorTo: red
-sdk: streamlit
-sdk_version: 1.17.0
-app_file: app.py
-pinned: false
-license: mit
----
-
-# AI RPG Self-Play RL ML Health Battler Game Press Release
-
-## Introduction
-🎉🎮🤖 Attention all gamers and health enthusiasts! The ultimate weapon to battle health problems has arrived - the AI RPG Self-Play RL ML Health Battler Game! 🤖🎮🎉
-
-## Gamified Health Battles
-- 🏋️♀️💪🏥 Sick of boring workouts and mundane health routines? Get ready to take on health problems like never before with our gamified approach. 🎉🕹️
-
-## Advanced AI Technology
-- 🤖🧠🔥 The AI technology behind our game is so advanced, you'll think you're battling a real-life disease! Let the personalized gameplay experience adapt to your style and keep you engaged for hours on end. 💻👨🔬
-
-## Healthy Competition
-- 🏆🎁🎮 Ready for some healthy competition? Compete against friends and other players around the world, earning rewards and achievements with our self-play reinforcement learning algorithms. 🌎🏆
-
-## Availability
-- 👨💻📲 The AI RPG Self-Play RL ML Health Battler Game is now available for public open source use on all platforms, including iOS and Android devices, via the world's largest ML platform Huggingface! Download now and start fighting for your health. 📲💥
-
-## Conclusion
-- Don't let health problems get the best of you - join the fight with our AI RPG Self-Play RL ML Health Battler Game! 🎮💪🩺
-
diff --git a/spaces/awacke1/Docker-FlanT5-TextGeneratorTranslator/Dockerfile b/spaces/awacke1/Docker-FlanT5-TextGeneratorTranslator/Dockerfile
deleted file mode 100644
index 7ae5f101e356251bb52f2b34fec3032a0a084da7..0000000000000000000000000000000000000000
--- a/spaces/awacke1/Docker-FlanT5-TextGeneratorTranslator/Dockerfile
+++ /dev/null
@@ -1,28 +0,0 @@
-# Use the official Python 3.9 image
-FROM python:3.9
-
-# Set the working directory to /code
-WORKDIR /code
-
-# Copy the current directory contents into the container at /code
-COPY ./requirements.txt /code/requirements.txt
-
-# Install requirements.txt
-RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
-
-# Set up a new user named "user" with user ID 1000
-RUN useradd -m -u 1000 user
-# Switch to the "user" user
-USER user
-# Set home to the user's home directory
-ENV HOME=/home/user \
- PATH=/home/user/.local/bin:$PATH
-
-# Set the working directory to the user's home directory
-WORKDIR $HOME/app
-
-# Copy the current directory contents into the container at $HOME/app setting the owner to the user
-COPY --chown=user . $HOME/app
-
-# Uvicorn: https://www.uvicorn.org/settings/ use main:app to run main.py
-CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
\ No newline at end of file
diff --git a/spaces/awacke1/Docker.Jupyterlab.Integration.HF/Dockerfile b/spaces/awacke1/Docker.Jupyterlab.Integration.HF/Dockerfile
deleted file mode 100644
index f93d1a3b2738f0cea3af549894cfa10fb45a5c07..0000000000000000000000000000000000000000
--- a/spaces/awacke1/Docker.Jupyterlab.Integration.HF/Dockerfile
+++ /dev/null
@@ -1,99 +0,0 @@
-FROM nvidia/cuda:11.3.1-base-ubuntu20.04
-
-ENV DEBIAN_FRONTEND=noninteractive \
- TZ=Europe/Paris
-
-# Remove any third-party apt sources to avoid issues with expiring keys.
-# Install some basic utilities
-RUN rm -f /etc/apt/sources.list.d/*.list && \
- apt-get update && apt-get install -y --no-install-recommends \
- curl \
- ca-certificates \
- sudo \
- git \
- git-lfs \
- zip \
- unzip \
- htop \
- bzip2 \
- libx11-6 \
- build-essential \
- libsndfile-dev \
- software-properties-common \
- && rm -rf /var/lib/apt/lists/*
-
-RUN add-apt-repository ppa:flexiondotorg/nvtop && \
- apt-get upgrade -y && \
- apt-get install -y --no-install-recommends nvtop
-
-RUN curl -sL https://deb.nodesource.com/setup_14.x | bash - && \
- apt-get install -y nodejs && \
- npm install -g configurable-http-proxy
-
-# Create a working directory
-WORKDIR /app
-
-# Create a non-root user and switch to it
-RUN adduser --disabled-password --gecos '' --shell /bin/bash user \
- && chown -R user:user /app
-RUN echo "user ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/90-user
-USER user
-
-# All users can use /home/user as their home directory
-ENV HOME=/home/user
-RUN mkdir $HOME/.cache $HOME/.config \
- && chmod -R 777 $HOME
-
-# Set up the Conda environment
-ENV CONDA_AUTO_UPDATE_CONDA=false \
- PATH=$HOME/miniconda/bin:$PATH
-RUN curl -sLo ~/miniconda.sh https://repo.continuum.io/miniconda/Miniconda3-py39_4.10.3-Linux-x86_64.sh \
- && chmod +x ~/miniconda.sh \
- && ~/miniconda.sh -b -p ~/miniconda \
- && rm ~/miniconda.sh \
- && conda clean -ya
-
-WORKDIR $HOME/app
-
-#######################################
-# Start root user section
-#######################################
-
-USER root
-
-# User Debian packages
-## Security warning : Potential user code executed as root (build time)
-RUN --mount=target=/root/packages.txt,source=packages.txt \
- apt-get update && \
- xargs -r -a /root/packages.txt apt-get install -y --no-install-recommends \
- && rm -rf /var/lib/apt/lists/*
-
-RUN --mount=target=/root/on_startup.sh,source=on_startup.sh,readwrite \
- bash /root/on_startup.sh
-
-#######################################
-# End root user section
-#######################################
-
-USER user
-
-# Python packages
-RUN --mount=target=requirements.txt,source=requirements.txt \
- pip install --no-cache-dir --upgrade -r requirements.txt
-
-# Copy the current directory contents into the container at $HOME/app setting the owner to the user
-COPY --chown=user . $HOME/app
-
-RUN chmod +x start_server.sh
-
-COPY --chown=user login.html /home/user/miniconda/lib/python3.9/site-packages/jupyter_server/templates/login.html
-
-ENV PYTHONUNBUFFERED=1 \
- GRADIO_ALLOW_FLAGGING=never \
- GRADIO_NUM_PORTS=1 \
- GRADIO_SERVER_NAME=0.0.0.0 \
- GRADIO_THEME=huggingface \
- SYSTEM=spaces \
- SHELL=/bin/bash
-
-CMD ["./start_server.sh"]
diff --git a/spaces/awacke1/Team.Click.Battle.Multiplayer/backup.app.py b/spaces/awacke1/Team.Click.Battle.Multiplayer/backup.app.py
deleted file mode 100644
index edce60673d1d2ea33e4a11cee480d9eb6e095bc4..0000000000000000000000000000000000000000
--- a/spaces/awacke1/Team.Click.Battle.Multiplayer/backup.app.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# Team.Click.Battle.Multiplayer
-
-import streamlit as st
-import random
-import time
-import plotly.graph_objects as go
-
-teams = [
- ('Team 1', '😎', 'Cool Squad', 'New York City'),
- ('Team 2', '🚀', 'Rocketeers', 'Los Angeles'),
- ('Team 3', '🤖', 'Robo Gang', 'San Francisco'),
- ('Team 4', '🌟', 'Super Stars', 'Chicago'),
- ('Team 5', '🐉', 'Dragons', 'Houston')
-]
-
-def run_scenario(duration=100, click_card_limit=None):
- start_time = time.time()
- votes = {team[0]: [0, 0] for team in teams} # Initialize upvotes and downvotes
- click_cards = 0
- chat = []
-
- while time.time() - start_time < duration:
- if click_card_limit is None or click_cards < click_card_limit:
- click_cards += 1
-
- team = random.choice(teams)
- vote_type = random.choice(['upvote', 'downvote'])
- clicks = 1 + 3 * (click_cards > 0)
- click_cards -= clicks > 1
-
- if vote_type == 'upvote':
- votes[team[0]][0] += clicks
- else:
- votes[team[0]][1] += clicks
- chat.append((team, vote_type, clicks))
- time.sleep(random.uniform(0, 1)) # Random sleep between 0 and 1 seconds
-
- return votes, chat
-
-def save_votes_to_file(votes, filename='upvotes.txt'):
- with open(filename, 'w') as f:
- for team, vote_counts in votes.items():
- f.write(f"{team}: {vote_counts[0]} upvotes, {vote_counts[1]} downvotes\n")
-
-def create_sankey(votes):
- labels = []
- source = []
- target = []
- value = []
-
- for i, team in enumerate(teams):
- labels.append(f"{team[1]} {team[2]}")
- source += [i, i]
- target += [len(teams), len(teams) + 1]
- value += [votes[team[0]][0], votes[team[0]][1]]
-
- labels += ['Upvotes', 'Downvotes']
-
- fig = go.Figure(data=[go.Sankey(
- node=dict(pad=15, thickness=20, line=dict(color='black', width=0.5), label=labels),
- link=dict(source=source, target=target, value=value))])
-
- fig.update_layout(title_text='Location Simulator by Nickname', title_font=dict(size=24, color='blue'))
-
- return fig
-
-st.title("Team Upvotes and Downvotes Emoji Game")
-
-duration = st.slider("Duration (seconds)", min_value=0, max_value=100, value=10, step=1)
-click_card_limit = st.slider("Click Card Limit", min_value=0, max_value=100, value=10, step=1)
-
-st.write(f"Running scenario for {duration} seconds with {click_card_limit} click cards...")
-votes, chat = run_scenario(duration, click_card_limit)
-
-save_votes_to_file(votes)
-
-st.header("Results")
-for team, vote_counts in votes.items():
- st.write(f"{team}: {vote_counts[0]} upvotes, {vote_counts[1]} downvotes")
-
-st.header("Chat")
-for message in chat:
- team, vote_type, clicks = message
- st.write(f"{team[1]} {team[2]}: {clicks} {vote_type}s")
-
-st.header("Sankey Graph")
-fig = create_sankey(votes)
-st.plotly_chart(fig)
\ No newline at end of file
diff --git a/spaces/azusarang/so-vits-svc-models-ba_P/vencoder/whisper/__init__.py b/spaces/azusarang/so-vits-svc-models-ba_P/vencoder/whisper/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/badayvedat/LLaVA/README.md b/spaces/badayvedat/LLaVA/README.md
deleted file mode 100644
index 1c9bfa43a7f646228ab2ea6a6107df231bf15d55..0000000000000000000000000000000000000000
--- a/spaces/badayvedat/LLaVA/README.md
+++ /dev/null
@@ -1,9 +0,0 @@
----
-title: LLaVA
-emoji: 🔥
-colorFrom: purple
-colorTo: gray
-sdk: gradio
-sdk_version: 3.36.1
-app_port: 7860
----
\ No newline at end of file
diff --git a/spaces/benzel34/fun/README.md b/spaces/benzel34/fun/README.md
deleted file mode 100644
index e15bcd445a505a9d06f026b5b3402d8e2ebaf9e2..0000000000000000000000000000000000000000
--- a/spaces/benzel34/fun/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
----
-title: fun
-emoji: 🎉
-colorFrom: indigo
-colorTo: indigo
-sdk: docker
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/bguberfain/Detic/detic/modeling/meta_arch/custom_rcnn.py b/spaces/bguberfain/Detic/detic/modeling/meta_arch/custom_rcnn.py
deleted file mode 100644
index 9a5ac721d42e40a8b4f28508b10a932cef827fcf..0000000000000000000000000000000000000000
--- a/spaces/bguberfain/Detic/detic/modeling/meta_arch/custom_rcnn.py
+++ /dev/null
@@ -1,232 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import copy
-import logging
-import numpy as np
-from typing import Dict, List, Optional, Tuple
-import torch
-from torch import nn
-import json
-from detectron2.utils.events import get_event_storage
-from detectron2.config import configurable
-from detectron2.structures import ImageList, Instances, Boxes
-import detectron2.utils.comm as comm
-
-from detectron2.modeling.meta_arch.build import META_ARCH_REGISTRY
-from detectron2.modeling.meta_arch.rcnn import GeneralizedRCNN
-from detectron2.modeling.postprocessing import detector_postprocess
-from detectron2.utils.visualizer import Visualizer, _create_text_labels
-from detectron2.data.detection_utils import convert_image_to_rgb
-
-from torch.cuda.amp import autocast
-from ..text.text_encoder import build_text_encoder
-from ..utils import load_class_freq, get_fed_loss_inds
-
-@META_ARCH_REGISTRY.register()
-class CustomRCNN(GeneralizedRCNN):
- '''
- Add image labels
- '''
- @configurable
- def __init__(
- self,
- with_image_labels = False,
- dataset_loss_weight = [],
- fp16 = False,
- sync_caption_batch = False,
- roi_head_name = '',
- cap_batch_ratio = 4,
- with_caption = False,
- dynamic_classifier = False,
- **kwargs):
- """
- """
- self.with_image_labels = with_image_labels
- self.dataset_loss_weight = dataset_loss_weight
- self.fp16 = fp16
- self.with_caption = with_caption
- self.sync_caption_batch = sync_caption_batch
- self.roi_head_name = roi_head_name
- self.cap_batch_ratio = cap_batch_ratio
- self.dynamic_classifier = dynamic_classifier
- self.return_proposal = False
- if self.dynamic_classifier:
- self.freq_weight = kwargs.pop('freq_weight')
- self.num_classes = kwargs.pop('num_classes')
- self.num_sample_cats = kwargs.pop('num_sample_cats')
- super().__init__(**kwargs)
- assert self.proposal_generator is not None
- if self.with_caption:
- assert not self.dynamic_classifier
- self.text_encoder = build_text_encoder(pretrain=True)
- for v in self.text_encoder.parameters():
- v.requires_grad = False
-
-
- @classmethod
- def from_config(cls, cfg):
- ret = super().from_config(cfg)
- ret.update({
- 'with_image_labels': cfg.WITH_IMAGE_LABELS,
- 'dataset_loss_weight': cfg.MODEL.DATASET_LOSS_WEIGHT,
- 'fp16': cfg.FP16,
- 'with_caption': cfg.MODEL.WITH_CAPTION,
- 'sync_caption_batch': cfg.MODEL.SYNC_CAPTION_BATCH,
- 'dynamic_classifier': cfg.MODEL.DYNAMIC_CLASSIFIER,
- 'roi_head_name': cfg.MODEL.ROI_HEADS.NAME,
- 'cap_batch_ratio': cfg.MODEL.CAP_BATCH_RATIO,
- })
- if ret['dynamic_classifier']:
- ret['freq_weight'] = load_class_freq(
- cfg.MODEL.ROI_BOX_HEAD.CAT_FREQ_PATH,
- cfg.MODEL.ROI_BOX_HEAD.FED_LOSS_FREQ_WEIGHT)
- ret['num_classes'] = cfg.MODEL.ROI_HEADS.NUM_CLASSES
- ret['num_sample_cats'] = cfg.MODEL.NUM_SAMPLE_CATS
- return ret
-
-
- def inference(
- self,
- batched_inputs: Tuple[Dict[str, torch.Tensor]],
- detected_instances: Optional[List[Instances]] = None,
- do_postprocess: bool = True,
- ):
- assert not self.training
- assert detected_instances is None
-
- images = self.preprocess_image(batched_inputs)
- features = self.backbone(images.tensor)
- proposals, _ = self.proposal_generator(images, features, None)
- results, _ = self.roi_heads(images, features, proposals)
- if do_postprocess:
- assert not torch.jit.is_scripting(), \
- "Scripting is not supported for postprocess."
- return CustomRCNN._postprocess(
- results, batched_inputs, images.image_sizes)
- else:
- return results
-
-
- def forward(self, batched_inputs: List[Dict[str, torch.Tensor]]):
- """
- Add ann_type
- Ignore proposal loss when training with image labels
- """
- if not self.training:
- return self.inference(batched_inputs)
-
- images = self.preprocess_image(batched_inputs)
-
- ann_type = 'box'
- gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
- if self.with_image_labels:
- for inst, x in zip(gt_instances, batched_inputs):
- inst._ann_type = x['ann_type']
- inst._pos_category_ids = x['pos_category_ids']
- ann_types = [x['ann_type'] for x in batched_inputs]
- assert len(set(ann_types)) == 1
- ann_type = ann_types[0]
- if ann_type in ['prop', 'proptag']:
- for t in gt_instances:
- t.gt_classes *= 0
-
- if self.fp16: # TODO (zhouxy): improve
- with autocast():
- features = self.backbone(images.tensor.half())
- features = {k: v.float() for k, v in features.items()}
- else:
- features = self.backbone(images.tensor)
-
- cls_features, cls_inds, caption_features = None, None, None
-
- if self.with_caption and 'caption' in ann_type:
- inds = [torch.randint(len(x['captions']), (1,))[0].item() \
- for x in batched_inputs]
- caps = [x['captions'][ind] for ind, x in zip(inds, batched_inputs)]
- caption_features = self.text_encoder(caps).float()
- if self.sync_caption_batch:
- caption_features = self._sync_caption_features(
- caption_features, ann_type, len(batched_inputs))
-
- if self.dynamic_classifier and ann_type != 'caption':
- cls_inds = self._sample_cls_inds(gt_instances, ann_type) # inds, inv_inds
- ind_with_bg = cls_inds[0].tolist() + [-1]
- cls_features = self.roi_heads.box_predictor[
- 0].cls_score.zs_weight[:, ind_with_bg].permute(1, 0).contiguous()
-
- classifier_info = cls_features, cls_inds, caption_features
- proposals, proposal_losses = self.proposal_generator(
- images, features, gt_instances)
-
- if self.roi_head_name in ['StandardROIHeads', 'CascadeROIHeads']:
- proposals, detector_losses = self.roi_heads(
- images, features, proposals, gt_instances)
- else:
- proposals, detector_losses = self.roi_heads(
- images, features, proposals, gt_instances,
- ann_type=ann_type, classifier_info=classifier_info)
-
- if self.vis_period > 0:
- storage = get_event_storage()
- if storage.iter % self.vis_period == 0:
- self.visualize_training(batched_inputs, proposals)
-
- losses = {}
- losses.update(detector_losses)
- if self.with_image_labels:
- if ann_type in ['box', 'prop', 'proptag']:
- losses.update(proposal_losses)
- else: # ignore proposal loss for non-bbox data
- losses.update({k: v * 0 for k, v in proposal_losses.items()})
- else:
- losses.update(proposal_losses)
- if len(self.dataset_loss_weight) > 0:
- dataset_sources = [x['dataset_source'] for x in batched_inputs]
- assert len(set(dataset_sources)) == 1
- dataset_source = dataset_sources[0]
- for k in losses:
- losses[k] *= self.dataset_loss_weight[dataset_source]
-
- if self.return_proposal:
- return proposals, losses
- else:
- return losses
-
-
- def _sync_caption_features(self, caption_features, ann_type, BS):
- has_caption_feature = (caption_features is not None)
- BS = (BS * self.cap_batch_ratio) if (ann_type == 'box') else BS
- rank = torch.full(
- (BS, 1), comm.get_rank(), dtype=torch.float32,
- device=self.device)
- if not has_caption_feature:
- caption_features = rank.new_zeros((BS, 512))
- caption_features = torch.cat([caption_features, rank], dim=1)
- global_caption_features = comm.all_gather(caption_features)
- caption_features = torch.cat(
- [x.to(self.device) for x in global_caption_features], dim=0) \
- if has_caption_feature else None # (NB) x (D + 1)
- return caption_features
-
-
- def _sample_cls_inds(self, gt_instances, ann_type='box'):
- if ann_type == 'box':
- gt_classes = torch.cat(
- [x.gt_classes for x in gt_instances])
- C = len(self.freq_weight)
- freq_weight = self.freq_weight
- else:
- gt_classes = torch.cat(
- [torch.tensor(
- x._pos_category_ids,
- dtype=torch.long, device=x.gt_classes.device) \
- for x in gt_instances])
- C = self.num_classes
- freq_weight = None
- assert gt_classes.max() < C, '{} {}'.format(gt_classes.max(), C)
- inds = get_fed_loss_inds(
- gt_classes, self.num_sample_cats, C,
- weight=freq_weight)
- cls_id_map = gt_classes.new_full(
- (self.num_classes + 1,), len(inds))
- cls_id_map[inds] = torch.arange(len(inds), device=cls_id_map.device)
- return inds, cls_id_map
\ No newline at end of file
diff --git a/spaces/bigcode/bigcode-editor/static/style.css b/spaces/bigcode/bigcode-editor/static/style.css
deleted file mode 100644
index cb6f1848674e8ce907e49f74864fc3fc023a96aa..0000000000000000000000000000000000000000
--- a/spaces/bigcode/bigcode-editor/static/style.css
+++ /dev/null
@@ -1,39 +0,0 @@
-body {
- padding: 2rem;
- font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif;
-}
-
-h1 {
- font-size: 16px;
- margin-top: 0;
-}
-
-p {
- color: rgb(107, 114, 128);
- font-size: 15px;
- margin-bottom: 10px;
- margin-top: 5px;
-}
-
-button {
- font-size: 15px;
-}
-
-.softspan {
- color: rgb(127, 134, 148);
- font-size: 15px;
- margin-bottom: 10px;
- margin-top: 5px;
-}
-
-.card {
- max-width: 800px;
- margin: 0 auto;
- padding: 16px;
- border: 1px solid lightgray;
- border-radius: 16px;
-}
-
-.card p:last-child {
- margin-bottom: 0;
-}
diff --git a/spaces/bioriAsaeru/text-to-voice/Descargar tonos de llamada mensajes notificaciones gratis para su android o iphone cmo personalizar tu mvil.md b/spaces/bioriAsaeru/text-to-voice/Descargar tonos de llamada mensajes notificaciones gratis para su android o iphone cmo personalizar tu mvil.md
deleted file mode 100644
index 5278a758c07ae1e7ce53f4c28052b3a9aeb6d3a2..0000000000000000000000000000000000000000
--- a/spaces/bioriAsaeru/text-to-voice/Descargar tonos de llamada mensajes notificaciones gratis para su android o iphone cmo personalizar tu mvil.md
+++ /dev/null
@@ -1,19 +0,0 @@
-
-
Descargador gratuito de tonos de llamada de música mp3. Itunemachine proporciona varios tipos de tonos de llamada gratuitos de alta calidad.
Music Ringtones Mobile, los mejores tonos para celular gratis. Descargar descargas gratuitas de tonos de llamada mp3 para teléfonos móviles, incluidos iPhone y Android
Últimos y tonos de llamada populares, descargar tonos de llamada, tonos de mensaje , Tonos de alarma etc... Tonos de llamada gratis para todo tipo de teléfonos
-
Descargar tonos de llamada, mensajes, notificaciones gratis para su android o iphone
Download File - https://urloso.com/2uyRHd
-
Descargue tonos de llamada gratis para cualquier teléfono móvil, incluidos tonos de llamada MP3 para Android o tonos de llamada m4r para Iphone Nuestros Ringtones son fáciles de descargar en solo 2 pasos.
-
Una colección de efectos de sonido de tono de llamada de teléfono. Descargar tonos de llamada gratis para el teléfono móvil. El tono de llamada de instalación está disponible en formato mp3, m4a para teléfonos Android y iPhone. Más de 50,000 tonos de iPhone, tonos de Samsung, tonos de SMS gratis
-
Hay una gran cantidad de páginas en Internet para descargar tonos de llamada para móviles, de modo que puedas personalizar el sonido de las llamadas, mensajes o notificaciones. La mayoría tienen tonos de mala calidad, incompletos o de pago, así que he recopilado las mejores en este artículo, desde donde se puede descargar gratis. A ver si te gustan!
-
La personalización de los móviles siempre ha estado presente, y gracias a las actualizaciones de sistema se han mantenido en el tiempo adaptando los móviles a las nuevas tendencias.Aplicaciones gratis
- Apps para ganar dinero
- Apps de WiFi gratis
- Apps para descargar música
- Apps para ver la TV
Anteriormente, los
tonos polifónicos y monofónicos eran los más populares, pero con los a los
avances tecnológicos esto ha cambiado.Tanto así, que hoy en día es posible personalizar cualquier dispositivo móvil con cientos de formas y estilos, ya sea con los mejores launchers, temas especiales, fondos para pantallas OLED, widgets y, por supuesto, los tonos llamadas no se quedan atrás. En este artículo conocerás las
mejores apps de tonos de llamada gratis para Android.
-
-
Seleccionar un tono de llamada es muy importante, ya que esto hablará mucho de tu persona y le otorgará personalización a tus dispositivo Android. Y aunque es muy común ver a algunos usuarios con smartphone utilizando tonos muy clásicos, en este top encontrarás apps de tonos de llamada gratis para Android sumamente originales.ZedgeZedge: millones de tonos de llamadas y notificaciones gratis
-
Una de las apps de tonos de llamada gratis para Android es Zedge. La misma cuenta con una base de datos impresionante de tonos variados, los cuales se encuentran organizados por categorías.Podrás escoger cualquiera de los tonos, ya sea para llamadas, mensajes o notificaciones de aplicaciones como Telegram o WhatsApp. Lo más increíble de Zedge es que a pesar de sus millones de tonos también tendrás disponible una gran variedad de wallpapers de la mejor calidad.Google Play | ZedgeTonos De Llamada Gratis 2021Tonos De Llamada Gratis 2021: más de 10.000 tonos gratis
-
La propia aplicación habla mucho por sí sola, se trata de una app para personalizar el tono de tu dispositivo móvil, la misma cuenta con una gran variedad de categorías, cada una de ellas repleta de tonos originales.Además, tendrás la facilidad de reproducir el tono al momento y no solo eso, también podrás ajustarlo directamente desde la pantalla y colocarlo como tono para llamadas, contactos, notificaciones o alarmas. Lo mejor de todo es que gracias a su buscador inteligente con simples palabras podrás encontrar tus canciones favoritas.Google Play | onos De Llamada Gratis 2021AudikoAudiko: una app con una gran cantidad de tonos y editor
-
Otras de las mejores apps de tonos de llamada gratis para Android es Audiko, esta aplicación es muy similar a las anteriores, cuenta con una base de datos gigantesca donde es posible encontrar cualquier tono, ya sea para SMS, llamadas, alarmas, aplicaciones de mensajería, notificaciones, entre otros.Sin embargo, lo que hace superior a Audiko en comparación a las otras, es que la misma viene con un editor integrado de música, y que a pesar de ser algo sencillo es perfecto para editar cualquier música que tengas en tu dispositivo. Esto te dará la oportunidad de crear tu propio tono de llamada personalizado.Google Play | AudikoTonos gratis appTonos gratis app: 10.000 tonos de llamadas únicos y gratuitos
-
Una de las aplicaciones que debes tener en cuenta para la personalización del tono de llamada en tu móvil es Tonos gratis app. La misma cuenta con una colección de los mejores tonos de los últimos tiempos, también presenta una gran variedad de categorías, y presenta un apartado con los tonos más destacados.Gracias a su interfaz fácil y minimalista, es muy sencillo acceder y manipular esta aplicación. Además, no es necesario registrarse, solo deberás instalar la aplicación, ejecutarla y ya tendrás una gran variedad de tonos disponibles. Por supuesto, podrás usar estos tonos para lo que desees, ya sea WhatsApp, notificaciones, alarmas, SMS, entre otros.Google Play | Tonos gratis appSonidos de Notificaciones - Ringtones y TonosLa aplicación Sonidos de Notificaciones es ideal para aquellos usuarios curiosos, ya que cuenta con más de 1000 tipos de tonos, y cada uno de los tonos es particularmente diferente a las demás aplicaciones.La plataforma viene con más de 18 categorías distintas, algunos de las categorías presentes son: divertidos, bebés, animales, molestos, memes ,disparos, alarmas, dibujos, originales, y lo mejor de todo, son completamente gratis.Google Play | Sonidos de Notificaciones - Ringtones y TonosTonos PopularesTonos Populares: tonos y sonidos populares
-
Aplicaciones de tonos de llamada gratis para Android existen centenares en la Google Play, sin embargo, la app Tonos Populares es una de las más resaltes. La misma cuenta con una interfaz minimalista que la hace destacar frente al resto.Además, podrás usar una gran variedad de tonos y adaptarlos según tus gustos y preferencias, ya sea colocar un tono diferente para cada contacto, tonos para las notificaciones de las aplicaciones de terceros e incluso usar distintos tonos para las alarmas.Google Play | Tonos PopularesTonos para Celular GratisOtra de las apps a tener en cuenta si deseas personalizar el tono de llamada del móvil es Tonos para Celular Gratis. Esta aplicación se encuentra valorada en la Google Play con más de 4.5 estrellas convirtiéndola en una de las mejores opciones para los usuarios.Su interfaz presenta la tendencia minimalista de otras apps, haciéndola bastante fácil al momento de usarla, cuanta con diversas categorías y cada una de ellas presenta tonos de la mejor calidad.Lo mejor de todo es que no solo es capaz personalizar el tono de llamada de WhatsApp, también tendrás a tu disposición una serie de wallpapers para la pantalla de tu móvil.Google Play | Tonos para Celular GratisRingo Ringtones tonos de llamada y notificacionesRingo Ringtones: tonos y melodías favoritas ebn Android
-
Por último en esta lista encontrarás a Ringo Ringtones tonos de llamada y notificaciones, una aplicación especializada en melodías y música en tendencia. Además, cuenta con una gran variedad de categorías como Latino, Danza, Hip Hop, Amor, Películas, Pop, R&B, RAP, Soul, Reggae, Religioso, Rock, Country, Clásica, Infantil, entre otros.Y no solo eso, también encontrarás tonos especiales y graciosos, de comedia, refranes, efectos especiales y tonos de fábrica de móviles como iPhone para descargar. Su interfaz es muy fácil de usar, no es necesario registrarse y contiene cero anuncios publicitarios.Google Play | Ringo Ringtones tonos de llamada y notificacionesSi te ha gustado este artículo, no dudes en echarle un vistazo a cómo cambiar el sonido de notificación por defecto en tu móvil Android.
-
La mayoría de los usuarios de iPhone están hartos del tono de llamada predeterminado para iPhone. Especialmente en lugares públicos, al oírlo, casi no se puede saber de quién está sonando el iPhone. Además de que suele ser muy molesto. Para evitar estos momentos incómodos, aquí te presentamos cinco sitios web donde puedes descargar tonos gratis para iPhone. Después, puedes configurarlos como tonos de llamada únicos para tu iPhone.
-
Este sitio web contiene una gran cantidad de recursos, incluyendo fondos de pantalla, tonos de llamada y temas. Bajo la pestaña de Ringtones, encontrarás muchas canciones geniales que puedes configurar como tonos para tu iPhone. Y puedes buscar fácilmente la música en una variedad de categorías, tales como clásico, country, rock y así sucesivamente. Seguro que encontrarás algunos sonidos que te gustan aquí. En cuanto a obtenerlas, tienes tres opciones: descargarlas en el equipo, enviarlas al correo o escanear un código QR. En general, es un buen sitio web para tomar tonos de llamada gratis para iPhone 6s.
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/bla/tranny/App/Users/UserRoutes.py b/spaces/bla/tranny/App/Users/UserRoutes.py
deleted file mode 100644
index ec1c923bf1e6ab6b3792538e51ce8adab543c350..0000000000000000000000000000000000000000
--- a/spaces/bla/tranny/App/Users/UserRoutes.py
+++ /dev/null
@@ -1,59 +0,0 @@
-from fastapi import APIRouter, Request, Depends, HTTPException
-from .Schemas import BaseRequest, UserSchema
-from pydantic import BaseModel
-from .Model import User
-from sqlalchemy import and_
-from fastapi_jwt_auth import AuthJWT
-
-
-class Settings(BaseModel):
- authjwt_secret_key: str = "secret"
- authjwt_access_token_expires: bool = False
-
-
-user_router = APIRouter(tags=["User"])
-
-
-@AuthJWT.load_config
-def get_config():
- return Settings()
-
-
-@user_router.post("/user/register")
-async def register_user(user: BaseRequest):
- data = await User.objects.filter(email=user.email).first()
- if data != None:
- return {"code": 400, "message": "user exists", "payload": None}
- else:
- user.hash_password()
- sample = await User.objects.create(**user.dict())
- return {"code": 200, "message": "success", "payload": None}
-
-
-async def get_token_owner(Authorize: AuthJWT = Depends()):
- Authorize.jwt_required()
- current_user = Authorize.get_jwt_subject()
- user = await User.objects.filter(id=int(current_user)).first()
- if not user:
- raise HTTPException(status_code=401, detail="Invalid Credentials")
-
- return UserSchema.from_orm(user)
-
-
-@user_router.post("/user/login")
-async def register_user(user: BaseRequest, Authorize: AuthJWT = Depends()):
- db_user = await User.objects.filter(email=user.email).first()
-
- if not db_user:
- raise HTTPException(status_code=401, detail="Invalid Credentials")
- if not db_user.verify_password(user.password):
- raise HTTPException(status_code=401, detail="Invalid Credentials")
- user = UserSchema.from_orm(db_user)
- access_token = Authorize.create_access_token(subject=user.id)
-
- return {
- "code": 200,
- "message": "success",
- "payload": db_user.__dict__,
- "access_token": access_token,
- }
diff --git a/spaces/camilosegura/traductor-multilenguaje/Lib/site-packages/PIL/PpmImagePlugin.py b/spaces/camilosegura/traductor-multilenguaje/Lib/site-packages/PIL/PpmImagePlugin.py
deleted file mode 100644
index 2cb1e56365dc369d6719717f0f6775c8c9e2fdd4..0000000000000000000000000000000000000000
--- a/spaces/camilosegura/traductor-multilenguaje/Lib/site-packages/PIL/PpmImagePlugin.py
+++ /dev/null
@@ -1,347 +0,0 @@
-#
-# The Python Imaging Library.
-# $Id$
-#
-# PPM support for PIL
-#
-# History:
-# 96-03-24 fl Created
-# 98-03-06 fl Write RGBA images (as RGB, that is)
-#
-# Copyright (c) Secret Labs AB 1997-98.
-# Copyright (c) Fredrik Lundh 1996.
-#
-# See the README file for information on usage and redistribution.
-#
-
-
-from . import Image, ImageFile
-from ._binary import i16be as i16
-from ._binary import o8
-from ._binary import o32le as o32
-
-#
-# --------------------------------------------------------------------
-
-b_whitespace = b"\x20\x09\x0a\x0b\x0c\x0d"
-
-MODES = {
- # standard
- b"P1": "1",
- b"P2": "L",
- b"P3": "RGB",
- b"P4": "1",
- b"P5": "L",
- b"P6": "RGB",
- # extensions
- b"P0CMYK": "CMYK",
- # PIL extensions (for test purposes only)
- b"PyP": "P",
- b"PyRGBA": "RGBA",
- b"PyCMYK": "CMYK",
-}
-
-
-def _accept(prefix):
- return prefix[0:1] == b"P" and prefix[1] in b"0123456y"
-
-
-##
-# Image plugin for PBM, PGM, and PPM images.
-
-
-class PpmImageFile(ImageFile.ImageFile):
- format = "PPM"
- format_description = "Pbmplus image"
-
- def _read_magic(self):
- magic = b""
- # read until whitespace or longest available magic number
- for _ in range(6):
- c = self.fp.read(1)
- if not c or c in b_whitespace:
- break
- magic += c
- return magic
-
- def _read_token(self):
- token = b""
- while len(token) <= 10: # read until next whitespace or limit of 10 characters
- c = self.fp.read(1)
- if not c:
- break
- elif c in b_whitespace: # token ended
- if not token:
- # skip whitespace at start
- continue
- break
- elif c == b"#":
- # ignores rest of the line; stops at CR, LF or EOF
- while self.fp.read(1) not in b"\r\n":
- pass
- continue
- token += c
- if not token:
- # Token was not even 1 byte
- msg = "Reached EOF while reading header"
- raise ValueError(msg)
- elif len(token) > 10:
- msg = f"Token too long in file header: {token.decode()}"
- raise ValueError(msg)
- return token
-
- def _open(self):
- magic_number = self._read_magic()
- try:
- mode = MODES[magic_number]
- except KeyError:
- msg = "not a PPM file"
- raise SyntaxError(msg)
-
- if magic_number in (b"P1", b"P4"):
- self.custom_mimetype = "image/x-portable-bitmap"
- elif magic_number in (b"P2", b"P5"):
- self.custom_mimetype = "image/x-portable-graymap"
- elif magic_number in (b"P3", b"P6"):
- self.custom_mimetype = "image/x-portable-pixmap"
-
- maxval = None
- decoder_name = "raw"
- if magic_number in (b"P1", b"P2", b"P3"):
- decoder_name = "ppm_plain"
- for ix in range(3):
- token = int(self._read_token())
- if ix == 0: # token is the x size
- xsize = token
- elif ix == 1: # token is the y size
- ysize = token
- if mode == "1":
- self.mode = "1"
- rawmode = "1;I"
- break
- else:
- self.mode = rawmode = mode
- elif ix == 2: # token is maxval
- maxval = token
- if not 0 < maxval < 65536:
- msg = "maxval must be greater than 0 and less than 65536"
- raise ValueError(msg)
- if maxval > 255 and mode == "L":
- self.mode = "I"
-
- if decoder_name != "ppm_plain":
- # If maxval matches a bit depth, use the raw decoder directly
- if maxval == 65535 and mode == "L":
- rawmode = "I;16B"
- elif maxval != 255:
- decoder_name = "ppm"
-
- args = (rawmode, 0, 1) if decoder_name == "raw" else (rawmode, maxval)
- self._size = xsize, ysize
- self.tile = [(decoder_name, (0, 0, xsize, ysize), self.fp.tell(), args)]
-
-
-#
-# --------------------------------------------------------------------
-
-
-class PpmPlainDecoder(ImageFile.PyDecoder):
- _pulls_fd = True
-
- def _read_block(self):
- return self.fd.read(ImageFile.SAFEBLOCK)
-
- def _find_comment_end(self, block, start=0):
- a = block.find(b"\n", start)
- b = block.find(b"\r", start)
- return min(a, b) if a * b > 0 else max(a, b) # lowest nonnegative index (or -1)
-
- def _ignore_comments(self, block):
- if self._comment_spans:
- # Finish current comment
- while block:
- comment_end = self._find_comment_end(block)
- if comment_end != -1:
- # Comment ends in this block
- # Delete tail of comment
- block = block[comment_end + 1 :]
- break
- else:
- # Comment spans whole block
- # So read the next block, looking for the end
- block = self._read_block()
-
- # Search for any further comments
- self._comment_spans = False
- while True:
- comment_start = block.find(b"#")
- if comment_start == -1:
- # No comment found
- break
- comment_end = self._find_comment_end(block, comment_start)
- if comment_end != -1:
- # Comment ends in this block
- # Delete comment
- block = block[:comment_start] + block[comment_end + 1 :]
- else:
- # Comment continues to next block(s)
- block = block[:comment_start]
- self._comment_spans = True
- break
- return block
-
- def _decode_bitonal(self):
- """
- This is a separate method because in the plain PBM format, all data tokens are
- exactly one byte, so the inter-token whitespace is optional.
- """
- data = bytearray()
- total_bytes = self.state.xsize * self.state.ysize
-
- while len(data) != total_bytes:
- block = self._read_block() # read next block
- if not block:
- # eof
- break
-
- block = self._ignore_comments(block)
-
- tokens = b"".join(block.split())
- for token in tokens:
- if token not in (48, 49):
- msg = b"Invalid token for this mode: %s" % bytes([token])
- raise ValueError(msg)
- data = (data + tokens)[:total_bytes]
- invert = bytes.maketrans(b"01", b"\xFF\x00")
- return data.translate(invert)
-
- def _decode_blocks(self, maxval):
- data = bytearray()
- max_len = 10
- out_byte_count = 4 if self.mode == "I" else 1
- out_max = 65535 if self.mode == "I" else 255
- bands = Image.getmodebands(self.mode)
- total_bytes = self.state.xsize * self.state.ysize * bands * out_byte_count
-
- half_token = False
- while len(data) != total_bytes:
- block = self._read_block() # read next block
- if not block:
- if half_token:
- block = bytearray(b" ") # flush half_token
- else:
- # eof
- break
-
- block = self._ignore_comments(block)
-
- if half_token:
- block = half_token + block # stitch half_token to new block
- half_token = False
-
- tokens = block.split()
-
- if block and not block[-1:].isspace(): # block might split token
- half_token = tokens.pop() # save half token for later
- if len(half_token) > max_len: # prevent buildup of half_token
- msg = (
- b"Token too long found in data: %s" % half_token[: max_len + 1]
- )
- raise ValueError(msg)
-
- for token in tokens:
- if len(token) > max_len:
- msg = b"Token too long found in data: %s" % token[: max_len + 1]
- raise ValueError(msg)
- value = int(token)
- if value > maxval:
- msg = f"Channel value too large for this mode: {value}"
- raise ValueError(msg)
- value = round(value / maxval * out_max)
- data += o32(value) if self.mode == "I" else o8(value)
- if len(data) == total_bytes: # finished!
- break
- return data
-
- def decode(self, buffer):
- self._comment_spans = False
- if self.mode == "1":
- data = self._decode_bitonal()
- rawmode = "1;8"
- else:
- maxval = self.args[-1]
- data = self._decode_blocks(maxval)
- rawmode = "I;32" if self.mode == "I" else self.mode
- self.set_as_raw(bytes(data), rawmode)
- return -1, 0
-
-
-class PpmDecoder(ImageFile.PyDecoder):
- _pulls_fd = True
-
- def decode(self, buffer):
- data = bytearray()
- maxval = self.args[-1]
- in_byte_count = 1 if maxval < 256 else 2
- out_byte_count = 4 if self.mode == "I" else 1
- out_max = 65535 if self.mode == "I" else 255
- bands = Image.getmodebands(self.mode)
- while len(data) < self.state.xsize * self.state.ysize * bands * out_byte_count:
- pixels = self.fd.read(in_byte_count * bands)
- if len(pixels) < in_byte_count * bands:
- # eof
- break
- for b in range(bands):
- value = (
- pixels[b] if in_byte_count == 1 else i16(pixels, b * in_byte_count)
- )
- value = min(out_max, round(value / maxval * out_max))
- data += o32(value) if self.mode == "I" else o8(value)
- rawmode = "I;32" if self.mode == "I" else self.mode
- self.set_as_raw(bytes(data), rawmode)
- return -1, 0
-
-
-#
-# --------------------------------------------------------------------
-
-
-def _save(im, fp, filename):
- if im.mode == "1":
- rawmode, head = "1;I", b"P4"
- elif im.mode == "L":
- rawmode, head = "L", b"P5"
- elif im.mode == "I":
- rawmode, head = "I;16B", b"P5"
- elif im.mode in ("RGB", "RGBA"):
- rawmode, head = "RGB", b"P6"
- else:
- msg = f"cannot write mode {im.mode} as PPM"
- raise OSError(msg)
- fp.write(head + b"\n%d %d\n" % im.size)
- if head == b"P6":
- fp.write(b"255\n")
- elif head == b"P5":
- if rawmode == "L":
- fp.write(b"255\n")
- else:
- fp.write(b"65535\n")
- ImageFile._save(im, fp, [("raw", (0, 0) + im.size, 0, (rawmode, 0, 1))])
-
- # ALTERNATIVE: save via builtin debug function
- # im._dump(filename)
-
-
-#
-# --------------------------------------------------------------------
-
-
-Image.register_open(PpmImageFile.format, PpmImageFile, _accept)
-Image.register_save(PpmImageFile.format, _save)
-
-Image.register_decoder("ppm", PpmDecoder)
-Image.register_decoder("ppm_plain", PpmPlainDecoder)
-
-Image.register_extensions(PpmImageFile.format, [".pbm", ".pgm", ".ppm", ".pnm"])
-
-Image.register_mime(PpmImageFile.format, "image/x-portable-anymap")
diff --git a/spaces/camilosegura/traductor-multilenguaje/Lib/site-packages/PIL/__main__.py b/spaces/camilosegura/traductor-multilenguaje/Lib/site-packages/PIL/__main__.py
deleted file mode 100644
index a05323f93b6850c2f86aedb3b1a5dee16358027f..0000000000000000000000000000000000000000
--- a/spaces/camilosegura/traductor-multilenguaje/Lib/site-packages/PIL/__main__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from .features import pilinfo
-
-pilinfo()
diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/DensePose/densepose/modeling/test_time_augmentation.py b/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/DensePose/densepose/modeling/test_time_augmentation.py
deleted file mode 100644
index ec2022ed16727f538993d2c7db60a60a1183b90d..0000000000000000000000000000000000000000
--- a/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/DensePose/densepose/modeling/test_time_augmentation.py
+++ /dev/null
@@ -1,207 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import copy
-import numpy as np
-import torch
-from fvcore.transforms import HFlipTransform, TransformList
-from torch.nn import functional as F
-
-from detectron2.data.transforms import RandomRotation, RotationTransform, apply_transform_gens
-from detectron2.modeling.postprocessing import detector_postprocess
-from detectron2.modeling.test_time_augmentation import DatasetMapperTTA, GeneralizedRCNNWithTTA
-
-from ..converters import HFlipConverter
-
-
-class DensePoseDatasetMapperTTA(DatasetMapperTTA):
- def __init__(self, cfg):
- super().__init__(cfg=cfg)
- self.angles = cfg.TEST.AUG.ROTATION_ANGLES
-
- def __call__(self, dataset_dict):
- ret = super().__call__(dataset_dict=dataset_dict)
- numpy_image = dataset_dict["image"].permute(1, 2, 0).numpy()
- for angle in self.angles:
- rotate = RandomRotation(angle=angle, expand=True)
- new_numpy_image, tfms = apply_transform_gens([rotate], np.copy(numpy_image))
- torch_image = torch.from_numpy(np.ascontiguousarray(new_numpy_image.transpose(2, 0, 1)))
- dic = copy.deepcopy(dataset_dict)
- # In DatasetMapperTTA, there is a pre_tfm transform (resize or no-op) that is
- # added at the beginning of each TransformList. That's '.transforms[0]'.
- dic["transforms"] = TransformList(
- [ret[-1]["transforms"].transforms[0]] + tfms.transforms
- )
- dic["image"] = torch_image
- ret.append(dic)
- return ret
-
-
-class DensePoseGeneralizedRCNNWithTTA(GeneralizedRCNNWithTTA):
- def __init__(self, cfg, model, transform_data, tta_mapper=None, batch_size=1):
- """
- Args:
- cfg (CfgNode):
- model (GeneralizedRCNN): a GeneralizedRCNN to apply TTA on.
- transform_data (DensePoseTransformData): contains symmetry label
- transforms used for horizontal flip
- tta_mapper (callable): takes a dataset dict and returns a list of
- augmented versions of the dataset dict. Defaults to
- `DatasetMapperTTA(cfg)`.
- batch_size (int): batch the augmented images into this batch size for inference.
- """
- self._transform_data = transform_data.to(model.device)
- super().__init__(cfg=cfg, model=model, tta_mapper=tta_mapper, batch_size=batch_size)
-
- # the implementation follows closely the one from detectron2/modeling
- def _inference_one_image(self, input):
- """
- Args:
- input (dict): one dataset dict with "image" field being a CHW tensor
-
- Returns:
- dict: one output dict
- """
- orig_shape = (input["height"], input["width"])
- # For some reason, resize with uint8 slightly increases box AP but decreases densepose AP
- input["image"] = input["image"].to(torch.uint8)
- augmented_inputs, tfms = self._get_augmented_inputs(input)
- # Detect boxes from all augmented versions
- with self._turn_off_roi_heads(["mask_on", "keypoint_on", "densepose_on"]):
- # temporarily disable roi heads
- all_boxes, all_scores, all_classes = self._get_augmented_boxes(augmented_inputs, tfms)
- merged_instances = self._merge_detections(all_boxes, all_scores, all_classes, orig_shape)
-
- if self.cfg.MODEL.MASK_ON or self.cfg.MODEL.DENSEPOSE_ON:
- # Use the detected boxes to obtain new fields
- augmented_instances = self._rescale_detected_boxes(
- augmented_inputs, merged_instances, tfms
- )
- # run forward on the detected boxes
- outputs = self._batch_inference(augmented_inputs, augmented_instances)
- # Delete now useless variables to avoid being out of memory
- del augmented_inputs, augmented_instances
- # average the predictions
- if self.cfg.MODEL.MASK_ON:
- merged_instances.pred_masks = self._reduce_pred_masks(outputs, tfms)
- if self.cfg.MODEL.DENSEPOSE_ON:
- merged_instances.pred_densepose = self._reduce_pred_densepose(outputs, tfms)
- # postprocess
- merged_instances = detector_postprocess(merged_instances, *orig_shape)
- return {"instances": merged_instances}
- else:
- return {"instances": merged_instances}
-
- def _get_augmented_boxes(self, augmented_inputs, tfms):
- # Heavily based on detectron2/modeling/test_time_augmentation.py
- # Only difference is that RotationTransform is excluded from bbox computation
- # 1: forward with all augmented images
- outputs = self._batch_inference(augmented_inputs)
- # 2: union the results
- all_boxes = []
- all_scores = []
- all_classes = []
- for output, tfm in zip(outputs, tfms):
- # Need to inverse the transforms on boxes, to obtain results on original image
- if not any(isinstance(t, RotationTransform) for t in tfm.transforms):
- # Some transforms can't compute bbox correctly
- pred_boxes = output.pred_boxes.tensor
- original_pred_boxes = tfm.inverse().apply_box(pred_boxes.cpu().numpy())
- all_boxes.append(torch.from_numpy(original_pred_boxes).to(pred_boxes.device))
- all_scores.extend(output.scores)
- all_classes.extend(output.pred_classes)
- all_boxes = torch.cat(all_boxes, dim=0)
- return all_boxes, all_scores, all_classes
-
- def _reduce_pred_densepose(self, outputs, tfms):
- # Should apply inverse transforms on densepose preds.
- # We assume only rotation, resize & flip are used. pred_masks is a scale-invariant
- # representation, so we handle the other ones specially
- for idx, (output, tfm) in enumerate(zip(outputs, tfms)):
- for t in tfm.transforms:
- for attr in ["coarse_segm", "fine_segm", "u", "v"]:
- setattr(
- output.pred_densepose,
- attr,
- _inverse_rotation(
- getattr(output.pred_densepose, attr), output.pred_boxes.tensor, t
- ),
- )
- if any(isinstance(t, HFlipTransform) for t in tfm.transforms):
- output.pred_densepose = HFlipConverter.convert(
- output.pred_densepose, self._transform_data
- )
- self._incremental_avg_dp(outputs[0].pred_densepose, output.pred_densepose, idx)
- return outputs[0].pred_densepose
-
- # incrementally computed average: u_(n + 1) = u_n + (x_(n+1) - u_n) / (n + 1).
- def _incremental_avg_dp(self, avg, new_el, idx):
- for attr in ["coarse_segm", "fine_segm", "u", "v"]:
- setattr(avg, attr, (getattr(avg, attr) * idx + getattr(new_el, attr)) / (idx + 1))
- if idx:
- # Deletion of the > 0 index intermediary values to prevent GPU OOM
- setattr(new_el, attr, None)
- return avg
-
-
-def _inverse_rotation(densepose_attrs, boxes, transform):
- # resample outputs to image size and rotate back the densepose preds
- # on the rotated images to the space of the original image
- if len(boxes) == 0 or not isinstance(transform, RotationTransform):
- return densepose_attrs
- boxes = boxes.int().cpu().numpy()
- wh_boxes = boxes[:, 2:] - boxes[:, :2] # bboxes in the rotated space
- inv_boxes = rotate_box_inverse(transform, boxes).astype(int) # bboxes in original image
- wh_diff = (inv_boxes[:, 2:] - inv_boxes[:, :2] - wh_boxes) // 2 # diff between new/old bboxes
- rotation_matrix = torch.tensor([transform.rm_image]).to(device=densepose_attrs.device).float()
- rotation_matrix[:, :, -1] = 0
- # To apply grid_sample for rotation, we need to have enough space to fit the original and
- # rotated bboxes. l_bds and r_bds are the left/right bounds that will be used to
- # crop the difference once the rotation is done
- l_bds = np.maximum(0, -wh_diff)
- for i in range(len(densepose_attrs)):
- if min(wh_boxes[i]) <= 0:
- continue
- densepose_attr = densepose_attrs[[i]].clone()
- # 1. Interpolate densepose attribute to size of the rotated bbox
- densepose_attr = F.interpolate(densepose_attr, wh_boxes[i].tolist()[::-1], mode="bilinear")
- # 2. Pad the interpolated attribute so it has room for the original + rotated bbox
- densepose_attr = F.pad(densepose_attr, tuple(np.repeat(np.maximum(0, wh_diff[i]), 2)))
- # 3. Compute rotation grid and transform
- grid = F.affine_grid(rotation_matrix, size=densepose_attr.shape)
- densepose_attr = F.grid_sample(densepose_attr, grid)
- # 4. Compute right bounds and crop the densepose_attr to the size of the original bbox
- r_bds = densepose_attr.shape[2:][::-1] - l_bds[i]
- densepose_attr = densepose_attr[:, :, l_bds[i][1] : r_bds[1], l_bds[i][0] : r_bds[0]]
- if min(densepose_attr.shape) > 0:
- # Interpolate back to the original size of the densepose attribute
- densepose_attr = F.interpolate(
- densepose_attr, densepose_attrs.shape[-2:], mode="bilinear"
- )
- # Adding a very small probability to the background class to fill padded zones
- densepose_attr[:, 0] += 1e-10
- densepose_attrs[i] = densepose_attr
- return densepose_attrs
-
-
-def rotate_box_inverse(rot_tfm, rotated_box):
- """
- rotated_box is a N * 4 array of [x0, y0, x1, y1] boxes
- When a bbox is rotated, it gets bigger, because we need to surround the tilted bbox
- So when a bbox is rotated then inverse-rotated, it is much bigger than the original
- This function aims to invert the rotation on the box, but also resize it to its original size
- """
- # 1. Compute the inverse rotation of the rotated bboxes (bigger than it )
- invrot_box = rot_tfm.inverse().apply_box(rotated_box)
- h, w = rotated_box[:, 3] - rotated_box[:, 1], rotated_box[:, 2] - rotated_box[:, 0]
- ih, iw = invrot_box[:, 3] - invrot_box[:, 1], invrot_box[:, 2] - invrot_box[:, 0]
- assert 2 * rot_tfm.abs_sin**2 != 1, "45 degrees angle can't be inverted"
- # 2. Inverse the corresponding computation in the rotation transform
- # to get the original height/width of the rotated boxes
- orig_h = (h * rot_tfm.abs_cos - w * rot_tfm.abs_sin) / (1 - 2 * rot_tfm.abs_sin**2)
- orig_w = (w * rot_tfm.abs_cos - h * rot_tfm.abs_sin) / (1 - 2 * rot_tfm.abs_sin**2)
- # 3. Resize the inverse-rotated bboxes to their original size
- invrot_box[:, 0] += (iw - orig_w) / 2
- invrot_box[:, 1] += (ih - orig_h) / 2
- invrot_box[:, 2] -= (iw - orig_w) / 2
- invrot_box[:, 3] -= (ih - orig_h) / 2
-
- return invrot_box
diff --git a/spaces/cc1234/stashface/app.py b/spaces/cc1234/stashface/app.py
deleted file mode 100644
index 16b91a8373be5d84ee9be6824daaccdb9b233ea7..0000000000000000000000000000000000000000
--- a/spaces/cc1234/stashface/app.py
+++ /dev/null
@@ -1,181 +0,0 @@
-import os
-import json
-import math
-import base64
-
-os.environ["DEEPFACE_HOME"] = "."
-
-import pyzipper
-import numpy as np
-import gradio as gr
-from annoy import AnnoyIndex
-from deepface import DeepFace
-
-
-index = AnnoyIndex(512, "euclidean")
-index.load(f"face.db")
-
-ANNOY_INDEX = json.load(open(f"face.json"))
-
-with pyzipper.AESZipFile('persons.zip') as zf:
- password = os.getenv("VISAGE_KEY","").encode('ascii')
- zf.setpassword(password)
- PERFORMER_DB = json.loads(zf.read('performers.json'))
-
-## Prediction functions
-
-
-def image_search_performer(image, threshold=20.0, results=3):
- """Search for a performer in an image
-
- Returns a list of performers with at least following keys:
- - id: the performer's id
- - distance: the distance between the face in the image and the performer's face
- - confidence: a confidence score between 0 and 100
- - hits: the number of times the performer was found in our database
- """
-
- image_array = np.array(image)
-
- face = DeepFace.represent(img_path = image_array, detector_backend='retinaface', model_name='Facenet512', normalization="Facenet2018")[0]['embedding']
- return search_performer(face, threshold, results)
-
-
-
-def image_search_performers(image, threshold=20.0, results=3):
- image_array = np.array(image)
-
- response = []
- faces = DeepFace.represent(img_path = image_array, detector_backend='retinaface', model_name='Facenet512', normalization="Facenet2018")
- # faces = DeepFace.represent(img_path = image_array, detector_backend='yolov8', model_name='SFace')
- for face in faces:
- embedding = face['embedding']
- area = face['facial_area']
- confidence = face['face_confidence']
- cimage = image.crop((area['x'], area['y'], area['x'] + area['w'], area['y'] + area['h']))
- # return image base64 encoded for display
- with open('temp.jpg', 'wb') as f:
- cimage.save(f, format='JPEG')
- im_b64 = base64.b64encode(open('temp.jpg', 'rb').read())
-
- response.append({
- 'image': im_b64.decode('ascii'),
- 'confidence': confidence,
- 'performers': search_performer(embedding, threshold, results)
- })
-
- return response
-
-
-def vector_search_performer(vector_json, threshold=20.0, results=3):
- """Search for a performer from a vector
-
- The vector should be created with Deepface and should be a 512 vector.
-
- For best results use the following settings:
- - detector_backend: retinaface
- - model: Facenet512
- - normalization: Facenet2018
-
- Returns a list of performers with at least following keys:
- - id: the performer's id
- - distance: the distance between the face in the image and the performer's face
- - confidence: a confidence score between 0 and 100
- - hits: the number of times the performer was found in our database
- """
-
- vector = np.array(json.loads(vector_json))
- return search_performer(vector, threshold, results)
-
-
-def search_performer(vector, threshold=20.0, results=3):
- threshold = threshold or 20.0
- results = results or 3
-
- ids, distances = index.get_nns_by_vector(
- vector, 50, search_k=10000, include_distances=True
- )
- persons = {}
- for p, distance in zip(ids, distances):
- id = ANNOY_INDEX[p]
- if id in persons:
- persons[id]["hits"] += 1
- persons[id]["distance"] -= 0.5
- persons[id]["confidence"] = normalize_confidence_from_distance(persons[id]["distance"], threshold)
- continue
-
- persons[id] = {
- "id": id,
- "distance": round(distance, 2),
- "confidence": normalize_confidence_from_distance(distance, threshold),
- "hits": 1,
- }
-
- if id in PERFORMER_DB:
- persons[id].update(PERFORMER_DB.get(id))
-
- persons = sorted(persons.values(), key=lambda x: x["distance"])
- # persons = [p for p in persons if p["distance"] < threshold]
- return persons[:results]
-
-
-def normalize_confidence_from_distance(distance, threshold=20.0):
- """Normalize confidence to 0-100 scale"""
- confidence = face_distance_to_conf(distance, threshold)
- return int(((confidence - 0.0) / (1.0 - 0.0)) * (100.0 - 0.0) + 0.0)
-
-
-def face_distance_to_conf(face_distance, face_match_threshold=20.0):
- """Using a face distance, calculate a similarity confidence value"""
- if face_distance > face_match_threshold:
- # The face is far away, so give it a low confidence
- range = (1.0 - face_match_threshold)
- linear_val = (1.0 - face_distance) / (range * 2.0)
- return linear_val
- else:
- # The face is close, so give it a high confidence
- range = face_match_threshold
- linear_val = 1.0 - (face_distance / (range * 2.0))
- # But adjust this value by a curve so that we don't get a linear
- # transition from close to far. We want it to be more confident
- # the closer it is.
- return linear_val + ((1.0 - linear_val) * math.pow((linear_val - 0.5) * 2, 0.2))
-
-
-image_search = gr.Interface(
- fn=image_search_performer,
- inputs=[
- gr.components.Image(),
- gr.components.Slider(label="threshold",minimum=0.0, maximum=30.0, value=20.0),
- gr.components.Slider(label="results", minimum=0, maximum=50, value=3, step=1),
- ],
- outputs=gr.outputs.JSON(label=""),
- title="Who is in the photo?",
- description="Upload an image of a person and we'll tell you who it is.",
-)
-
-image_search_multiple = gr.Interface(
- fn=image_search_performers,
- inputs=[
- gr.components.Image(type='pil'),
- gr.components.Slider(label="threshold",minimum=0.0, maximum=30.0, value=20.0),
- gr.components.Slider(label="results", minimum=0, maximum=50, value=3, step=1),
- ],
- outputs=gr.outputs.JSON(label=""),
- title="Who is in the photo?",
- description="Upload an image of a person(s) and we'll tell you who it is.",
-)
-
-vector_search = gr.Interface(
- fn=vector_search_performer,
- inputs=[
- gr.components.Textbox(),
- gr.components.Slider(label="threshold",minimum=0.0, maximum=30.0, value=20.0),
- gr.components.Slider(label="results", minimum=0, maximum=50, value=3, step=1),
- ],
- outputs=gr.outputs.JSON(label=""),
- title="Who is in the photo?",
- description="512 vector created with deepface of a person and we'll tell you who it is.",
-)
-
-gr.TabbedInterface([image_search, image_search_multiple, vector_search]).launch(enable_queue=True, server_name="0.0.0.0")
diff --git a/spaces/ccolas/TastyPiano/checkpoints/music_representation/sentence_embedding/smallbert_b256_r128_1/best_model/README.md b/spaces/ccolas/TastyPiano/checkpoints/music_representation/sentence_embedding/smallbert_b256_r128_1/best_model/README.md
deleted file mode 100644
index 4888c6e09df2c91303cb3aae00bd1bc308f37d1f..0000000000000000000000000000000000000000
--- a/spaces/ccolas/TastyPiano/checkpoints/music_representation/sentence_embedding/smallbert_b256_r128_1/best_model/README.md
+++ /dev/null
@@ -1,91 +0,0 @@
----
-pipeline_tag: sentence-similarity
-tags:
-- sentence-transformers
-- feature-extraction
-- sentence-similarity
----
-
-# {MODEL_NAME}
-
-This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 128 dimensional dense vector space and can be used for tasks like clustering or semantic search.
-
-
-
-## Usage (Sentence-Transformers)
-
-Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:
-
-```
-pip install -U sentence-transformers
-```
-
-Then you can use the model like this:
-
-```python
-from sentence_transformers import SentenceTransformer
-sentences = ["This is an example sentence", "Each sentence is converted"]
-
-model = SentenceTransformer('{MODEL_NAME}')
-embeddings = model.encode(sentences)
-print(embeddings)
-```
-
-
-
-## Evaluation Results
-
-
-
-For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME})
-
-
-## Training
-The model was trained with the parameters:
-
-**DataLoader**:
-
-`torch.utils.data.dataloader.DataLoader` of length 15725 with parameters:
-```
-{'batch_size': 16, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'}
-```
-
-**Loss**:
-
-`sentence_transfo.sentence_transformers.losses.MultipleNegativesRankingLoss.MultipleNegativesRankingLoss` with parameters:
- ```
- {'scale': 20.0, 'similarity_fct': 'cos_sim'}
- ```
-
-Parameters of the fit()-Method:
-```
-{
- "epochs": 200,
- "evaluate_every_steps": 1000,
- "evaluator": "NoneType",
- "max_grad_norm": 1,
- "nb_eval_steps": 100,
- "optimizer_class": "
",
- "optimizer_params": {
- "lr": 2e-05
- },
- "scheduler": "WarmupLinear",
- "steps_per_epoch": null,
- "warmup_steps": 10000,
- "weight_decay": 0.01
-}
-```
-
-
-## Full Model Architecture
-```
-SentenceTransformer(
- (0): Transformer({'max_seq_length': 510, 'do_lower_case': False}) with Transformer model: BertModel
- (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False})
- (2): Dense({'in_features': 768, 'out_features': 128, 'bias': True, 'activation_function': 'torch.nn.modules.activation.Tanh'})
-)
-```
-
-## Citing & Authors
-
-
\ No newline at end of file
diff --git a/spaces/ccolas/TastyPiano/src/cocktails/utilities/glass_and_volume_utilities.py b/spaces/ccolas/TastyPiano/src/cocktails/utilities/glass_and_volume_utilities.py
deleted file mode 100644
index f84927fe5cc74c8198752b858661afae2805a576..0000000000000000000000000000000000000000
--- a/spaces/ccolas/TastyPiano/src/cocktails/utilities/glass_and_volume_utilities.py
+++ /dev/null
@@ -1,42 +0,0 @@
-
-
-glass_conversion = {'coupe':'coupe',
- 'martini': 'martini',
- 'collins': 'collins',
- 'oldfashion': 'oldfashion',
- 'Coupe glass': 'coupe',
- 'Old-fashioned glass': 'oldfashion',
- 'Martini glass': 'martini',
- 'Nick & Nora glass': 'coupe',
- 'Julep tin': 'oldfashion',
- 'Collins or Pineapple shell glass': 'collins',
- 'Collins glass': 'collins',
- 'Rocks glass': 'oldfashion',
- 'Highball (max 10oz/300ml)': 'collins',
- 'Wine glass': 'coupe',
- 'Flute glass': 'coupe',
- 'Double old-fashioned': 'oldfashion',
- 'Copa glass': 'coupe',
- 'Toddy glass': 'oldfashion',
- 'Sling glass': 'collins',
- 'Goblet glass': 'oldfashion',
- 'Fizz or Highball (8oz to 10oz)': 'collins',
- 'Copper mug or Collins glass': 'collins',
- 'Tiki mug or collins': 'collins',
- 'Snifter glass': 'oldfashion',
- 'Coconut shell or Collins glass': 'collins',
- 'Martini (large 10oz) glass': 'martini',
- 'Hurricane glass': 'collins',
- 'Absinthe glass or old-fashioned glass': 'oldfashion'
- }
-glass_volume = dict(coupe = 200,
- collins=350,
- martini=200,
- oldfashion=320)
-assert set(glass_conversion.values()) == set(glass_volume.keys())
-
-volume_ranges = dict(stirred=(90, 97),
- built=(70, 75),
- shaken=(98, 112),
- egg_shaken=(130, 143),
- carbonated=(150, 150))
\ No newline at end of file
diff --git a/spaces/chansung/llama2-with-gradio-chat/README.md b/spaces/chansung/llama2-with-gradio-chat/README.md
deleted file mode 100644
index 0ffb9ef296597248dcb3548eac563406ec3d8a86..0000000000000000000000000000000000000000
--- a/spaces/chansung/llama2-with-gradio-chat/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Llama2 With Gradio Chat
-emoji: 🤗💬
-colorFrom: pink
-colorTo: green
-sdk: gradio
-sdk_version: 3.41.0
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/chasemcdo/hf_localai/entrypoint.sh b/spaces/chasemcdo/hf_localai/entrypoint.sh
deleted file mode 100644
index f8d81bce5810dd528b176104b3bd8454bec1165e..0000000000000000000000000000000000000000
--- a/spaces/chasemcdo/hf_localai/entrypoint.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/bash
-set -e
-
-cd /build
-
-if [ "$REBUILD" != "false" ]; then
- ESPEAK_DATA=/build/lib/Linux-$(uname -m)/piper_phonemize/lib/espeak-ng-data make build -j${THREADS:-1}
-fi
-
-./local-ai "$@"
diff --git a/spaces/chikoto/Umamusume-DeBERTa-VITS2-TTS-JP/train_ms_cluster.py b/spaces/chikoto/Umamusume-DeBERTa-VITS2-TTS-JP/train_ms_cluster.py
deleted file mode 100644
index b06f3362dcd0c4e957876abf36ac81be762f849d..0000000000000000000000000000000000000000
--- a/spaces/chikoto/Umamusume-DeBERTa-VITS2-TTS-JP/train_ms_cluster.py
+++ /dev/null
@@ -1,608 +0,0 @@
-# flake8: noqa: E402
-
-import os
-
-import torch
-from torch.nn import functional as F
-from torch.utils.data import DataLoader
-from torch.utils.tensorboard import SummaryWriter
-import torch.distributed as dist
-from torch.nn.parallel import DistributedDataParallel as DDP
-from torch.cuda.amp import autocast, GradScaler
-from tqdm import tqdm
-import logging
-
-from transformers import get_linear_schedule_with_warmup
-
-logging.getLogger("numba").setLevel(logging.WARNING)
-import commons
-import utils
-from data_utils import (
- TextAudioSpeakerLoader,
- TextAudioSpeakerCollate,
- DistributedBucketSampler,
-)
-from models import (
- SynthesizerTrn,
- MultiPeriodDiscriminator,
- DurationDiscriminator,
-)
-from losses import generator_loss, discriminator_loss, feature_loss, kl_loss
-from mel_processing import mel_spectrogram_torch, spec_to_mel_torch
-from text.symbols import symbols
-
-torch.backends.cuda.matmul.allow_tf32 = True
-torch.backends.cudnn.allow_tf32 = (
- True # If encontered training problem,please try to disable TF32.
-)
-torch.set_float32_matmul_precision("medium")
-torch.backends.cudnn.benchmark = True
-torch.backends.cuda.sdp_kernel("flash")
-torch.backends.cuda.enable_flash_sdp(True)
-torch.backends.cuda.enable_mem_efficient_sdp(
- True
-) # Not available if torch version is lower than 2.0
-torch.backends.cuda.enable_math_sdp(True)
-global_step = 0
-
-
-def run():
- dist.init_process_group(
- backend="nccl",
- init_method="env://", # Due to some training problem,we proposed to use gloo instead of nccl.
- ) # Use torchrun instead of mp.spawn
- rank = dist.get_rank()
- n_gpus = dist.get_world_size()
- hps = utils.get_hparams()
- torch.manual_seed(hps.train.seed)
- torch.cuda.set_device(rank)
- global global_step
- if rank == 0:
- logger = utils.get_logger(hps.model_dir)
- logger.info(hps)
- utils.check_git_hash(hps.model_dir)
- writer = SummaryWriter(log_dir=hps.model_dir)
- writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval"))
- train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data)
- train_sampler = DistributedBucketSampler(
- train_dataset,
- hps.train.batch_size,
- [32, 300, 400, 500, 600, 700, 800, 900, 1000],
- num_replicas=n_gpus,
- rank=rank,
- shuffle=True,
- )
- collate_fn = TextAudioSpeakerCollate()
- train_loader = DataLoader(
- train_dataset,
- num_workers=8,
- shuffle=False,
- pin_memory=True,
- collate_fn=collate_fn,
- batch_sampler=train_sampler,
- persistent_workers=True,
- prefetch_factor=4,
- ) # DataLoader config could be adjusted.
- if rank == 0:
- eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data)
- eval_loader = DataLoader(
- eval_dataset,
- num_workers=0,
- shuffle=False,
- batch_size=1,
- pin_memory=True,
- drop_last=False,
- collate_fn=collate_fn,
- )
- if (
- "use_noise_scaled_mas" in hps.model.keys()
- and hps.model.use_noise_scaled_mas is True
- ):
- print("Using noise scaled MAS for VITS2")
- mas_noise_scale_initial = 0.01
- noise_scale_delta = 2e-6
- else:
- print("Using normal MAS for VITS1")
- mas_noise_scale_initial = 0.0
- noise_scale_delta = 0.0
- if (
- "use_duration_discriminator" in hps.model.keys()
- and hps.model.use_duration_discriminator is True
- ):
- print("Using duration discriminator for VITS2")
- net_dur_disc = DurationDiscriminator(
- hps.model.hidden_channels,
- hps.model.hidden_channels,
- 3,
- 0.1,
- gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0,
- ).cuda(rank)
- if (
- "use_spk_conditioned_encoder" in hps.model.keys()
- and hps.model.use_spk_conditioned_encoder is True
- ):
- if hps.data.n_speakers == 0:
- raise ValueError(
- "n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model"
- )
- else:
- print("Using normal encoder for VITS1")
-
- net_g = SynthesizerTrn(
- len(symbols),
- hps.data.filter_length // 2 + 1,
- hps.train.segment_size // hps.data.hop_length,
- n_speakers=hps.data.n_speakers,
- mas_noise_scale_initial=mas_noise_scale_initial,
- noise_scale_delta=noise_scale_delta,
- **hps.model,
- ).cuda(rank)
-
- net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank)
- optim_g = torch.optim.AdamW(
- filter(lambda p: p.requires_grad, net_g.parameters()),
- hps.train.learning_rate,
- betas=hps.train.betas,
- eps=hps.train.eps,
- )
- optim_d = torch.optim.AdamW(
- net_d.parameters(),
- hps.train.learning_rate,
- betas=hps.train.betas,
- eps=hps.train.eps,
- )
- if net_dur_disc is not None:
- optim_dur_disc = torch.optim.AdamW(
- net_dur_disc.parameters(),
- hps.train.learning_rate,
- betas=hps.train.betas,
- eps=hps.train.eps,
- )
- else:
- optim_dur_disc = None
- net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True)
- net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True)
- if net_dur_disc is not None:
- net_dur_disc = DDP(net_dur_disc, device_ids=[rank], find_unused_parameters=True)
- try:
- if net_dur_disc is not None:
- _, _, dur_resume_lr, epoch_str = utils.load_checkpoint(
- utils.latest_checkpoint_path(hps.model_dir, "DUR_*.pth"),
- net_dur_disc,
- None,
- skip_optimizer=hps.train.skip_optimizer
- if "skip_optimizer" in hps.train
- else True,
- )
- _, _, g_resume_lr, epoch_str = utils.load_checkpoint(
- utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"),
- net_g,
- None,
- skip_optimizer=hps.train.skip_optimizer
- if "skip_optimizer" in hps.train
- else True,
- )
- _, _, d_resume_lr, epoch_str = utils.load_checkpoint(
- utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"),
- net_d,
- None,
- skip_optimizer=hps.train.skip_optimizer
- if "skip_optimizer" in hps.train
- else True,
- )
- # if not optim_g.param_groups[0].get("initial_lr"):
- # optim_g.param_groups[0]["initial_lr"] = g_resume_lr
- # if not optim_d.param_groups[0].get("initial_lr"):
- # optim_d.param_groups[0]["initial_lr"] = d_resume_lr
- # if not optim_dur_disc.param_groups[0].get("initial_lr"):
- # optim_dur_disc.param_groups[0]["initial_lr"] = dur_resume_lr
-
- epoch_str = 1
- global_step = (epoch_str - 1) * len(train_loader)
- except Exception as e:
- print(e)
- epoch_str = 1
- global_step = 0
-
- training_steps = len(train_loader) * hps.train.epochs
- warmup_steps = training_steps * hps.train.warmup_ratio
- if rank == 0:
- print(f"Total training steps {len(train_loader)} * {hps.train.epochs} = {training_steps}")
- print(f"Warmup steps {warmup_steps}")
- scheduler_g = get_linear_schedule_with_warmup(optim_g, warmup_steps, training_steps)
- # scheduler_g = torch.optim.lr_scheduler.ExponentialLR(
- # optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2
- # )
- scheduler_d = get_linear_schedule_with_warmup(optim_d, warmup_steps, training_steps)
- # scheduler_d = torch.optim.lr_scheduler.ExponentialLR(
- # optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2
- # )
- if net_dur_disc is not None:
- if not optim_dur_disc.param_groups[0].get("initial_lr"):
- optim_dur_disc.param_groups[0]["initial_lr"] = dur_resume_lr
- scheduler_dur_disc = get_linear_schedule_with_warmup(optim_dur_disc, warmup_steps, training_steps)
- # scheduler_dur_disc = torch.optim.lr_scheduler.ExponentialLR(
- # optim_dur_disc, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2
- # )
- else:
- scheduler_dur_disc = None
- scaler = GradScaler(enabled=hps.train.fp16_run)
-
- for epoch in range(epoch_str, hps.train.epochs + 1):
- if rank == 0:
- train_and_evaluate(
- rank,
- epoch,
- hps,
- [net_g, net_d, net_dur_disc],
- [optim_g, optim_d, optim_dur_disc],
- [scheduler_g, scheduler_d, scheduler_dur_disc],
- scaler,
- [train_loader, eval_loader],
- logger,
- [writer, writer_eval],
- )
- else:
- train_and_evaluate(
- rank,
- epoch,
- hps,
- [net_g, net_d, net_dur_disc],
- [optim_g, optim_d, optim_dur_disc],
- [scheduler_g, scheduler_d, scheduler_dur_disc],
- scaler,
- [train_loader, None],
- None,
- None,
- )
- # scheduler_g.step()
- # scheduler_d.step()
- # if net_dur_disc is not None:
- # scheduler_dur_disc.step()
-
-
-def train_and_evaluate(
- rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers
-):
- net_g, net_d, net_dur_disc = nets
- optim_g, optim_d, optim_dur_disc = optims
- scheduler_g, scheduler_d, scheduler_dur_disc = schedulers
- train_loader, eval_loader = loaders
- if writers is not None:
- writer, writer_eval = writers
-
- train_loader.batch_sampler.set_epoch(epoch)
- global global_step
-
- net_g.train()
- net_d.train()
- if net_dur_disc is not None:
- net_dur_disc.train()
- for batch_idx, (
- x,
- x_lengths,
- spec,
- spec_lengths,
- y,
- y_lengths,
- speakers,
- tone,
- language,
- bert,
- ja_bert,
- ) in enumerate(tqdm(train_loader)):
- if net_g.module.use_noise_scaled_mas:
- current_mas_noise_scale = (
- net_g.module.mas_noise_scale_initial
- - net_g.module.noise_scale_delta * global_step
- )
- net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0)
- x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(
- rank, non_blocking=True
- )
- spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(
- rank, non_blocking=True
- )
- y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(
- rank, non_blocking=True
- )
- speakers = speakers.cuda(rank, non_blocking=True)
- tone = tone.cuda(rank, non_blocking=True)
- language = language.cuda(rank, non_blocking=True)
- bert = bert.cuda(rank, non_blocking=True)
- ja_bert = ja_bert.cuda(rank, non_blocking=True)
-
- with autocast(enabled=hps.train.fp16_run):
- (
- y_hat,
- l_length,
- attn,
- ids_slice,
- x_mask,
- z_mask,
- (z, z_p, m_p, logs_p, m_q, logs_q),
- (hidden_x, logw, logw_),
- ) = net_g(
- x,
- x_lengths,
- spec,
- spec_lengths,
- speakers,
- tone,
- language,
- bert,
- ja_bert,
- )
- mel = spec_to_mel_torch(
- spec,
- hps.data.filter_length,
- hps.data.n_mel_channels,
- hps.data.sampling_rate,
- hps.data.mel_fmin,
- hps.data.mel_fmax,
- )
- y_mel = commons.slice_segments(
- mel, ids_slice, hps.train.segment_size // hps.data.hop_length
- )
- y_hat_mel = mel_spectrogram_torch(
- y_hat.squeeze(1),
- hps.data.filter_length,
- hps.data.n_mel_channels,
- hps.data.sampling_rate,
- hps.data.hop_length,
- hps.data.win_length,
- hps.data.mel_fmin,
- hps.data.mel_fmax,
- )
-
- y = commons.slice_segments(
- y, ids_slice * hps.data.hop_length, hps.train.segment_size
- ) # slice
-
- # Discriminator
- y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach())
- with autocast(enabled=False):
- loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(
- y_d_hat_r, y_d_hat_g
- )
- loss_disc_all = loss_disc
- if net_dur_disc is not None:
- y_dur_hat_r, y_dur_hat_g = net_dur_disc(
- hidden_x.detach(), x_mask.detach(), logw.detach(), logw_.detach()
- )
- with autocast(enabled=False):
- # TODO: I think need to mean using the mask, but for now, just mean all
- (
- loss_dur_disc,
- losses_dur_disc_r,
- losses_dur_disc_g,
- ) = discriminator_loss(y_dur_hat_r, y_dur_hat_g)
- loss_dur_disc_all = loss_dur_disc
- optim_dur_disc.zero_grad()
- scaler.scale(loss_dur_disc_all).backward()
- scaler.unscale_(optim_dur_disc)
- commons.clip_grad_value_(net_dur_disc.parameters(), hps.train.clipping_grad_norm)
- scaler.step(optim_dur_disc)
- scheduler_dur_disc.step()
-
- optim_d.zero_grad()
- scaler.scale(loss_disc_all).backward()
- scaler.unscale_(optim_d)
- grad_norm_d = commons.clip_grad_value_(net_d.parameters(), hps.train.clipping_grad_norm)
- scaler.step(optim_d)
- scheduler_d.step()
-
- with autocast(enabled=hps.train.fp16_run):
- # Generator
- y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat)
- if net_dur_disc is not None:
- y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x, x_mask, logw, logw_)
- with autocast(enabled=False):
- loss_dur = torch.sum(l_length.float())
- loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel
- loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl
-
- loss_fm = feature_loss(fmap_r, fmap_g)
- loss_gen, losses_gen = generator_loss(y_d_hat_g)
- loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl
- if net_dur_disc is not None:
- loss_dur_gen, losses_dur_gen = generator_loss(y_dur_hat_g)
- loss_gen_all += loss_dur_gen
- optim_g.zero_grad()
- scaler.scale(loss_gen_all).backward()
- scaler.unscale_(optim_g)
- grad_norm_g = commons.clip_grad_value_(net_g.parameters(), hps.train.clipping_grad_norm)
- scaler.step(optim_g)
- scaler.update()
- scheduler_g.step()
-
- if rank == 0:
- if global_step % hps.train.log_interval == 0:
- lr = optim_g.param_groups[0]["lr"]
- losses = [loss_disc, loss_gen, loss_fm, loss_mel, loss_dur, loss_kl]
- logger.info(
- "Train Epoch: {} [{:.0f}%]".format(
- epoch, 100.0 * batch_idx / len(train_loader)
- )
- )
- logger.info([x.item() for x in losses] + [global_step, lr])
-
- scalar_dict = {
- "loss/g/total": loss_gen_all,
- "loss/d/total": loss_disc_all,
- "learning_rate": lr,
- "grad_norm_d": grad_norm_d,
- "grad_norm_g": grad_norm_g,
- }
- scalar_dict.update(
- {
- "loss/g/fm": loss_fm,
- "loss/g/mel": loss_mel,
- "loss/g/dur": loss_dur,
- "loss/g/kl": loss_kl,
- }
- )
- scalar_dict.update(
- {"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)}
- )
- scalar_dict.update(
- {"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)}
- )
- scalar_dict.update(
- {"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)}
- )
-
- image_dict = {
- "slice/mel_org": utils.plot_spectrogram_to_numpy(
- y_mel[0].data.cpu().numpy()
- ),
- "slice/mel_gen": utils.plot_spectrogram_to_numpy(
- y_hat_mel[0].data.cpu().numpy()
- ),
- "all/mel": utils.plot_spectrogram_to_numpy(
- mel[0].data.cpu().numpy()
- ),
- "all/attn": utils.plot_alignment_to_numpy(
- attn[0, 0].data.cpu().numpy()
- ),
- }
- utils.summarize(
- writer=writer,
- global_step=global_step,
- images=image_dict,
- scalars=scalar_dict,
- )
-
- if global_step % hps.train.eval_interval == 0:
- evaluate(hps, net_g, eval_loader, writer_eval)
- utils.save_checkpoint(
- net_g,
- optim_g,
- hps.train.learning_rate,
- epoch,
- os.path.join(hps.model_dir, "G_{}.pth".format(global_step)),
- )
- utils.save_checkpoint(
- net_d,
- optim_d,
- hps.train.learning_rate,
- epoch,
- os.path.join(hps.model_dir, "D_{}.pth".format(global_step)),
- )
- if net_dur_disc is not None:
- utils.save_checkpoint(
- net_dur_disc,
- optim_dur_disc,
- hps.train.learning_rate,
- epoch,
- os.path.join(hps.model_dir, "DUR_{}.pth".format(global_step)),
- )
- keep_ckpts = getattr(hps.train, "keep_ckpts", 5)
- if keep_ckpts > 0:
- utils.clean_checkpoints(
- path_to_models=hps.model_dir,
- n_ckpts_to_keep=keep_ckpts,
- sort_by_time=True,
- )
-
- global_step += 1
-
- if rank == 0:
- logger.info("====> Epoch: {}".format(epoch))
-
-
-def evaluate(hps, generator, eval_loader, writer_eval):
- generator.eval()
- image_dict = {}
- audio_dict = {}
- print("Evaluating ...")
- with torch.no_grad():
- for batch_idx, (
- x,
- x_lengths,
- spec,
- spec_lengths,
- y,
- y_lengths,
- speakers,
- tone,
- language,
- bert,
- ja_bert,
- ) in enumerate(tqdm(eval_loader)):
- x, x_lengths = x.cuda(), x_lengths.cuda()
- spec, spec_lengths = spec.cuda(), spec_lengths.cuda()
- y, y_lengths = y.cuda(), y_lengths.cuda()
- speakers = speakers.cuda()
- bert = bert.cuda()
- ja_bert = ja_bert.cuda()
- tone = tone.cuda()
- language = language.cuda()
- for use_sdp in [True, False]:
- y_hat, attn, mask, *_ = generator.module.infer(
- x,
- x_lengths,
- speakers,
- tone,
- language,
- bert,
- ja_bert,
- y=spec,
- max_len=1000,
- sdp_ratio=0.0 if not use_sdp else 1.0,
- )
- y_hat_lengths = mask.sum([1, 2]).long() * hps.data.hop_length
-
- mel = spec_to_mel_torch(
- spec,
- hps.data.filter_length,
- hps.data.n_mel_channels,
- hps.data.sampling_rate,
- hps.data.mel_fmin,
- hps.data.mel_fmax,
- )
- y_hat_mel = mel_spectrogram_torch(
- y_hat.squeeze(1).float(),
- hps.data.filter_length,
- hps.data.n_mel_channels,
- hps.data.sampling_rate,
- hps.data.hop_length,
- hps.data.win_length,
- hps.data.mel_fmin,
- hps.data.mel_fmax,
- )
- image_dict.update(
- {
- f"gen/mel_{batch_idx}": utils.plot_spectrogram_to_numpy(
- y_hat_mel[0].cpu().numpy()
- )
- }
- )
- audio_dict.update(
- {
- f"gen/audio_{batch_idx}_{use_sdp}": y_hat[
- 0, :, : y_hat_lengths[0]
- ]
- }
- )
- image_dict.update(
- {
- f"gt/mel_{batch_idx}": utils.plot_spectrogram_to_numpy(
- mel[0].cpu().numpy()
- )
- }
- )
- audio_dict.update({f"gt/audio_{batch_idx}": y[0, :, : y_lengths[0]]})
-
- utils.summarize(
- writer=writer_eval,
- global_step=global_step,
- images=image_dict,
- audios=audio_dict,
- audio_sampling_rate=hps.data.sampling_rate,
- )
- generator.train()
-
-
-if __name__ == "__main__":
- run()
diff --git a/spaces/chilge/Fushimi/vdecoder/hifigan/nvSTFT.py b/spaces/chilge/Fushimi/vdecoder/hifigan/nvSTFT.py
deleted file mode 100644
index 88597d62a505715091f9ba62d38bf0a85a31b95a..0000000000000000000000000000000000000000
--- a/spaces/chilge/Fushimi/vdecoder/hifigan/nvSTFT.py
+++ /dev/null
@@ -1,111 +0,0 @@
-import math
-import os
-os.environ["LRU_CACHE_CAPACITY"] = "3"
-import random
-import torch
-import torch.utils.data
-import numpy as np
-import librosa
-from librosa.util import normalize
-from librosa.filters import mel as librosa_mel_fn
-from scipy.io.wavfile import read
-import soundfile as sf
-
-def load_wav_to_torch(full_path, target_sr=None, return_empty_on_exception=False):
- sampling_rate = None
- try:
- data, sampling_rate = sf.read(full_path, always_2d=True)# than soundfile.
- except Exception as ex:
- print(f"'{full_path}' failed to load.\nException:")
- print(ex)
- if return_empty_on_exception:
- return [], sampling_rate or target_sr or 32000
- else:
- raise Exception(ex)
-
- if len(data.shape) > 1:
- data = data[:, 0]
- assert len(data) > 2# check duration of audio file is > 2 samples (because otherwise the slice operation was on the wrong dimension)
-
- if np.issubdtype(data.dtype, np.integer): # if audio data is type int
- max_mag = -np.iinfo(data.dtype).min # maximum magnitude = min possible value of intXX
- else: # if audio data is type fp32
- max_mag = max(np.amax(data), -np.amin(data))
- max_mag = (2**31)+1 if max_mag > (2**15) else ((2**15)+1 if max_mag > 1.01 else 1.0) # data should be either 16-bit INT, 32-bit INT or [-1 to 1] float32
-
- data = torch.FloatTensor(data.astype(np.float32))/max_mag
-
- if (torch.isinf(data) | torch.isnan(data)).any() and return_empty_on_exception:# resample will crash with inf/NaN inputs. return_empty_on_exception will return empty arr instead of except
- return [], sampling_rate or target_sr or 32000
- if target_sr is not None and sampling_rate != target_sr:
- data = torch.from_numpy(librosa.core.resample(data.numpy(), orig_sr=sampling_rate, target_sr=target_sr))
- sampling_rate = target_sr
-
- return data, sampling_rate
-
-def dynamic_range_compression(x, C=1, clip_val=1e-5):
- return np.log(np.clip(x, a_min=clip_val, a_max=None) * C)
-
-def dynamic_range_decompression(x, C=1):
- return np.exp(x) / C
-
-def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
- return torch.log(torch.clamp(x, min=clip_val) * C)
-
-def dynamic_range_decompression_torch(x, C=1):
- return torch.exp(x) / C
-
-class STFT():
- def __init__(self, sr=22050, n_mels=80, n_fft=1024, win_size=1024, hop_length=256, fmin=20, fmax=11025, clip_val=1e-5):
- self.target_sr = sr
-
- self.n_mels = n_mels
- self.n_fft = n_fft
- self.win_size = win_size
- self.hop_length = hop_length
- self.fmin = fmin
- self.fmax = fmax
- self.clip_val = clip_val
- self.mel_basis = {}
- self.hann_window = {}
-
- def get_mel(self, y, center=False):
- sampling_rate = self.target_sr
- n_mels = self.n_mels
- n_fft = self.n_fft
- win_size = self.win_size
- hop_length = self.hop_length
- fmin = self.fmin
- fmax = self.fmax
- clip_val = self.clip_val
-
- if torch.min(y) < -1.:
- print('min value is ', torch.min(y))
- if torch.max(y) > 1.:
- print('max value is ', torch.max(y))
-
- if fmax not in self.mel_basis:
- mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax)
- self.mel_basis[str(fmax)+'_'+str(y.device)] = torch.from_numpy(mel).float().to(y.device)
- self.hann_window[str(y.device)] = torch.hann_window(self.win_size).to(y.device)
-
- y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_length)/2), int((n_fft-hop_length)/2)), mode='reflect')
- y = y.squeeze(1)
-
- spec = torch.stft(y, n_fft, hop_length=hop_length, win_length=win_size, window=self.hann_window[str(y.device)],
- center=center, pad_mode='reflect', normalized=False, onesided=True)
- # print(111,spec)
- spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9))
- # print(222,spec)
- spec = torch.matmul(self.mel_basis[str(fmax)+'_'+str(y.device)], spec)
- # print(333,spec)
- spec = dynamic_range_compression_torch(spec, clip_val=clip_val)
- # print(444,spec)
- return spec
-
- def __call__(self, audiopath):
- audio, sr = load_wav_to_torch(audiopath, target_sr=self.target_sr)
- spect = self.get_mel(audio.unsqueeze(0)).squeeze(0)
- return spect
-
-stft = STFT()
diff --git a/spaces/chongjie/MCC_slim/util/pos_embed.py b/spaces/chongjie/MCC_slim/util/pos_embed.py
deleted file mode 100644
index 51162ff71e875487d8904021ac33f718f7d7c698..0000000000000000000000000000000000000000
--- a/spaces/chongjie/MCC_slim/util/pos_embed.py
+++ /dev/null
@@ -1,117 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-# --------------------------------------------------------
-# Position embedding utils
-# --------------------------------------------------------
-
-import numpy as np
-
-import torch
-
-# --------------------------------------------------------
-# 2D sine-cosine position embedding
-# References:
-# Transformer: https://github.com/tensorflow/models/blob/master/official/nlp/transformer/model_utils.py
-# MoCo v3: https://github.com/facebookresearch/moco-v3
-# --------------------------------------------------------
-def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False):
- """
- grid_size: int of the grid height and width
- return:
- pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
- """
- grid_h = np.arange(grid_size, dtype=np.float32)
- grid_w = np.arange(grid_size, dtype=np.float32)
- grid = np.meshgrid(grid_w, grid_h) # here w goes first
- grid = np.stack(grid, axis=0)
-
- grid = grid.reshape([2, 1, grid_size, grid_size])
- pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
- if cls_token:
- pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0)
- return pos_embed
-
-
-def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
- assert embed_dim % 2 == 0
-
- # use half of dimensions to encode grid_h
- emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2)
- emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2)
-
- emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D)
- return emb
-
-
-def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
- """
- embed_dim: output dimension for each position
- pos: a list of positions to be encoded: size (M,)
- out: (M, D)
- """
- assert embed_dim % 2 == 0
- omega = np.arange(embed_dim // 2, dtype=np.float32)
- omega /= embed_dim / 2.
- omega = 1. / 10000**omega # (D/2,)
-
- pos = pos.reshape(-1) # (M,)
- out = np.einsum('m,d->md', pos, omega) # (M, D/2), outer product
-
- emb_sin = np.sin(out) # (M, D/2)
- emb_cos = np.cos(out) # (M, D/2)
-
- emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)
- return emb
-
-def get_1d_sincos_pos_embed_from_grid_torch(embed_dim, pos):
- """
- embed_dim: output dimension for each position
- pos: a list of positions to be encoded: size (M,)
- out: (M, D)
- """
- assert embed_dim % 2 == 0
- omega = torch.arange(embed_dim // 2, device=pos.device).float()
- omega /= embed_dim / 2.
- omega = 1. / 10000**omega # (D/2,)
-
- pos = pos.reshape(-1) # (M,)
- out = torch.einsum('m,d->md', pos, omega) # (M, D/2), outer product
-
- emb_sin = torch.sin(out) # (M, D/2)
- emb_cos = torch.cos(out) # (M, D/2)
-
- emb = torch.cat([emb_sin, emb_cos], axis=1) # (M, D)
- return emb
-
-
-
-# --------------------------------------------------------
-# Interpolate position embeddings for high-resolution
-# References:
-# DeiT: https://github.com/facebookresearch/deit
-# --------------------------------------------------------
-def interpolate_pos_embed(model, checkpoint_model):
- if 'pos_embed' in checkpoint_model:
- pos_embed_checkpoint = checkpoint_model['pos_embed']
- embedding_size = pos_embed_checkpoint.shape[-1]
- num_patches = model.patch_embed.num_patches
- num_extra_tokens = model.pos_embed.shape[-2] - num_patches
- # height (== width) for the checkpoint position embedding
- orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
- # height (== width) for the new position embedding
- new_size = int(num_patches ** 0.5)
- # class_token and dist_token are kept unchanged
- if orig_size != new_size:
- print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size))
- extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
- # only the position tokens are interpolated
- pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
- pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
- pos_tokens = torch.nn.functional.interpolate(
- pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
- pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
- new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
- checkpoint_model['pos_embed'] = new_pos_embed
diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/varLib/interpolatable.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/varLib/interpolatable.py
deleted file mode 100644
index d5428c2002286b7de284fff89a79f62cd6ebd656..0000000000000000000000000000000000000000
--- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/varLib/interpolatable.py
+++ /dev/null
@@ -1,583 +0,0 @@
-"""
-Tool to find wrong contour order between different masters, and
-other interpolatability (or lack thereof) issues.
-
-Call as:
-$ fonttools varLib.interpolatable font1 font2 ...
-"""
-
-from fontTools.pens.basePen import AbstractPen, BasePen
-from fontTools.pens.pointPen import SegmentToPointPen
-from fontTools.pens.recordingPen import RecordingPen
-from fontTools.pens.statisticsPen import StatisticsPen
-from fontTools.pens.momentsPen import OpenContourError
-from collections import OrderedDict
-import math
-import itertools
-import sys
-
-
-def _rot_list(l, k):
- """Rotate list by k items forward. Ie. item at position 0 will be
- at position k in returned list. Negative k is allowed."""
- n = len(l)
- k %= n
- if not k:
- return l
- return l[n - k :] + l[: n - k]
-
-
-class PerContourPen(BasePen):
- def __init__(self, Pen, glyphset=None):
- BasePen.__init__(self, glyphset)
- self._glyphset = glyphset
- self._Pen = Pen
- self._pen = None
- self.value = []
-
- def _moveTo(self, p0):
- self._newItem()
- self._pen.moveTo(p0)
-
- def _lineTo(self, p1):
- self._pen.lineTo(p1)
-
- def _qCurveToOne(self, p1, p2):
- self._pen.qCurveTo(p1, p2)
-
- def _curveToOne(self, p1, p2, p3):
- self._pen.curveTo(p1, p2, p3)
-
- def _closePath(self):
- self._pen.closePath()
- self._pen = None
-
- def _endPath(self):
- self._pen.endPath()
- self._pen = None
-
- def _newItem(self):
- self._pen = pen = self._Pen()
- self.value.append(pen)
-
-
-class PerContourOrComponentPen(PerContourPen):
- def addComponent(self, glyphName, transformation):
- self._newItem()
- self.value[-1].addComponent(glyphName, transformation)
-
-
-class RecordingPointPen(BasePen):
- def __init__(self):
- self.value = []
-
- def beginPath(self, identifier=None, **kwargs):
- pass
-
- def endPath(self) -> None:
- pass
-
- def addPoint(self, pt, segmentType=None):
- self.value.append((pt, False if segmentType is None else True))
-
-
-def _vdiff(v0, v1):
- return tuple(b - a for a, b in zip(v0, v1))
-
-
-def _vlen(vec):
- v = 0
- for x in vec:
- v += x * x
- return v
-
-
-def _complex_vlen(vec):
- v = 0
- for x in vec:
- v += abs(x) * abs(x)
- return v
-
-
-def _matching_cost(G, matching):
- return sum(G[i][j] for i, j in enumerate(matching))
-
-
-def min_cost_perfect_bipartite_matching(G):
- n = len(G)
- try:
- from scipy.optimize import linear_sum_assignment
-
- rows, cols = linear_sum_assignment(G)
- assert (rows == list(range(n))).all()
- return list(cols), _matching_cost(G, cols)
- except ImportError:
- pass
-
- try:
- from munkres import Munkres
-
- cols = [None] * n
- for row, col in Munkres().compute(G):
- cols[row] = col
- return cols, _matching_cost(G, cols)
- except ImportError:
- pass
-
- if n > 6:
- raise Exception("Install Python module 'munkres' or 'scipy >= 0.17.0'")
-
- # Otherwise just brute-force
- permutations = itertools.permutations(range(n))
- best = list(next(permutations))
- best_cost = _matching_cost(G, best)
- for p in permutations:
- cost = _matching_cost(G, p)
- if cost < best_cost:
- best, best_cost = list(p), cost
- return best, best_cost
-
-
-def test(glyphsets, glyphs=None, names=None, ignore_missing=False):
- if names is None:
- names = glyphsets
- if glyphs is None:
- # `glyphs = glyphsets[0].keys()` is faster, certainly, but doesn't allow for sparse TTFs/OTFs given out of order
- # ... risks the sparse master being the first one, and only processing a subset of the glyphs
- glyphs = {g for glyphset in glyphsets for g in glyphset.keys()}
-
- hist = []
- problems = OrderedDict()
-
- def add_problem(glyphname, problem):
- problems.setdefault(glyphname, []).append(problem)
-
- for glyph_name in glyphs:
- try:
- m0idx = 0
- allVectors = []
- allNodeTypes = []
- allContourIsomorphisms = []
- for glyphset, name in zip(glyphsets, names):
- glyph = glyphset[glyph_name]
-
- if glyph is None:
- if not ignore_missing:
- add_problem(glyph_name, {"type": "missing", "master": name})
- allNodeTypes.append(None)
- allVectors.append(None)
- allContourIsomorphisms.append(None)
- continue
-
- perContourPen = PerContourOrComponentPen(
- RecordingPen, glyphset=glyphset
- )
- try:
- glyph.draw(perContourPen, outputImpliedClosingLine=True)
- except TypeError:
- glyph.draw(perContourPen)
- contourPens = perContourPen.value
- del perContourPen
-
- contourVectors = []
- contourIsomorphisms = []
- nodeTypes = []
- allNodeTypes.append(nodeTypes)
- allVectors.append(contourVectors)
- allContourIsomorphisms.append(contourIsomorphisms)
- for ix, contour in enumerate(contourPens):
- nodeVecs = tuple(instruction[0] for instruction in contour.value)
- nodeTypes.append(nodeVecs)
-
- stats = StatisticsPen(glyphset=glyphset)
- try:
- contour.replay(stats)
- except OpenContourError as e:
- add_problem(
- glyph_name,
- {"master": name, "contour": ix, "type": "open_path"},
- )
- continue
- size = math.sqrt(abs(stats.area)) * 0.5
- vector = (
- int(size),
- int(stats.meanX),
- int(stats.meanY),
- int(stats.stddevX * 2),
- int(stats.stddevY * 2),
- int(stats.correlation * size),
- )
- contourVectors.append(vector)
- # print(vector)
-
- # Check starting point
- if nodeVecs[0] == "addComponent":
- continue
- assert nodeVecs[0] == "moveTo"
- assert nodeVecs[-1] in ("closePath", "endPath")
- points = RecordingPointPen()
- converter = SegmentToPointPen(points, False)
- contour.replay(converter)
- # points.value is a list of pt,bool where bool is true if on-curve and false if off-curve;
- # now check all rotations and mirror-rotations of the contour and build list of isomorphic
- # possible starting points.
- bits = 0
- for pt, b in points.value:
- bits = (bits << 1) | b
- n = len(points.value)
- mask = (1 << n) - 1
- isomorphisms = []
- contourIsomorphisms.append(isomorphisms)
- for i in range(n):
- b = ((bits << i) & mask) | ((bits >> (n - i)))
- if b == bits:
- isomorphisms.append(
- _rot_list([complex(*pt) for pt, bl in points.value], i)
- )
- # Add mirrored rotations
- mirrored = list(reversed(points.value))
- reversed_bits = 0
- for pt, b in mirrored:
- reversed_bits = (reversed_bits << 1) | b
- for i in range(n):
- b = ((reversed_bits << i) & mask) | ((reversed_bits >> (n - i)))
- if b == bits:
- isomorphisms.append(
- _rot_list([complex(*pt) for pt, bl in mirrored], i)
- )
-
- # m0idx should be the index of the first non-None item in allNodeTypes,
- # else give it the first index of None, which is likely 0
- m0idx = allNodeTypes.index(
- next((x for x in allNodeTypes if x is not None), None)
- )
- # m0 is the first non-None item in allNodeTypes, or the first item if all are None
- m0 = allNodeTypes[m0idx]
- for i, m1 in enumerate(allNodeTypes[m0idx + 1 :]):
- if m1 is None:
- continue
- if len(m0) != len(m1):
- add_problem(
- glyph_name,
- {
- "type": "path_count",
- "master_1": names[m0idx],
- "master_2": names[m0idx + i + 1],
- "value_1": len(m0),
- "value_2": len(m1),
- },
- )
- if m0 == m1:
- continue
- for pathIx, (nodes1, nodes2) in enumerate(zip(m0, m1)):
- if nodes1 == nodes2:
- continue
- if len(nodes1) != len(nodes2):
- add_problem(
- glyph_name,
- {
- "type": "node_count",
- "path": pathIx,
- "master_1": names[m0idx],
- "master_2": names[m0idx + i + 1],
- "value_1": len(nodes1),
- "value_2": len(nodes2),
- },
- )
- continue
- for nodeIx, (n1, n2) in enumerate(zip(nodes1, nodes2)):
- if n1 != n2:
- add_problem(
- glyph_name,
- {
- "type": "node_incompatibility",
- "path": pathIx,
- "node": nodeIx,
- "master_1": names[m0idx],
- "master_2": names[m0idx + i + 1],
- "value_1": n1,
- "value_2": n2,
- },
- )
- continue
-
- # m0idx should be the index of the first non-None item in allVectors,
- # else give it the first index of None, which is likely 0
- m0idx = allVectors.index(
- next((x for x in allVectors if x is not None), None)
- )
- # m0 is the first non-None item in allVectors, or the first item if all are None
- m0 = allVectors[m0idx]
- for i, m1 in enumerate(allVectors[m0idx + 1 :]):
- if m1 is None:
- continue
- if len(m0) != len(m1):
- # We already reported this
- continue
- if not m0:
- continue
- costs = [[_vlen(_vdiff(v0, v1)) for v1 in m1] for v0 in m0]
- matching, matching_cost = min_cost_perfect_bipartite_matching(costs)
- identity_matching = list(range(len(m0)))
- identity_cost = sum(costs[i][i] for i in range(len(m0)))
- if (
- matching != identity_matching
- and matching_cost < identity_cost * 0.95
- ):
- add_problem(
- glyph_name,
- {
- "type": "contour_order",
- "master_1": names[m0idx],
- "master_2": names[m0idx + i + 1],
- "value_1": list(range(len(m0))),
- "value_2": matching,
- },
- )
- break
-
- # m0idx should be the index of the first non-None item in allContourIsomorphisms,
- # else give it the first index of None, which is likely 0
- m0idx = allContourIsomorphisms.index(
- next((x for x in allContourIsomorphisms if x is not None), None)
- )
- # m0 is the first non-None item in allContourIsomorphisms, or the first item if all are None
- m0 = allContourIsomorphisms[m0idx]
- for i, m1 in enumerate(allContourIsomorphisms[m0idx + 1 :]):
- if m1 is None:
- continue
- if len(m0) != len(m1):
- # We already reported this
- continue
- if not m0:
- continue
- for ix, (contour0, contour1) in enumerate(zip(m0, m1)):
- c0 = contour0[0]
- costs = [
- v for v in (_complex_vlen(_vdiff(c0, c1)) for c1 in contour1)
- ]
- min_cost = min(costs)
- first_cost = costs[0]
- if min_cost < first_cost * 0.95:
- add_problem(
- glyph_name,
- {
- "type": "wrong_start_point",
- "contour": ix,
- "master_1": names[m0idx],
- "master_2": names[m0idx + i + 1],
- },
- )
-
- except ValueError as e:
- add_problem(
- glyph_name,
- {"type": "math_error", "master": name, "error": e},
- )
- return problems
-
-
-def main(args=None):
- """Test for interpolatability issues between fonts"""
- import argparse
-
- parser = argparse.ArgumentParser(
- "fonttools varLib.interpolatable",
- description=main.__doc__,
- )
- parser.add_argument(
- "--glyphs",
- action="store",
- help="Space-separate name of glyphs to check",
- )
- parser.add_argument(
- "--json",
- action="store_true",
- help="Output report in JSON format",
- )
- parser.add_argument(
- "--quiet",
- action="store_true",
- help="Only exit with code 1 or 0, no output",
- )
- parser.add_argument(
- "--ignore-missing",
- action="store_true",
- help="Will not report glyphs missing from sparse masters as errors",
- )
- parser.add_argument(
- "inputs",
- metavar="FILE",
- type=str,
- nargs="+",
- help="Input a single DesignSpace/Glyphs file, or multiple TTF/UFO files",
- )
-
- args = parser.parse_args(args)
-
- glyphs = set(args.glyphs.split()) if args.glyphs else None
-
- from os.path import basename
-
- fonts = []
- names = []
-
- if len(args.inputs) == 1:
- if args.inputs[0].endswith(".designspace"):
- from fontTools.designspaceLib import DesignSpaceDocument
-
- designspace = DesignSpaceDocument.fromfile(args.inputs[0])
- args.inputs = [master.path for master in designspace.sources]
-
- elif args.inputs[0].endswith(".glyphs"):
- from glyphsLib import GSFont, to_ufos
-
- gsfont = GSFont(args.inputs[0])
- fonts.extend(to_ufos(gsfont))
- names = ["%s-%s" % (f.info.familyName, f.info.styleName) for f in fonts]
- args.inputs = []
-
- elif args.inputs[0].endswith(".ttf"):
- from fontTools.ttLib import TTFont
-
- font = TTFont(args.inputs[0])
- if "gvar" in font:
- # Is variable font
- gvar = font["gvar"]
- # Gather all "master" locations
- locs = set()
- for variations in gvar.variations.values():
- for var in variations:
- loc = []
- for tag, val in sorted(var.axes.items()):
- loc.append((tag, val[1]))
- locs.add(tuple(loc))
- # Rebuild locs as dictionaries
- new_locs = [{}]
- names.append("()")
- for loc in sorted(locs, key=lambda v: (len(v), v)):
- names.append(str(loc))
- l = {}
- for tag, val in loc:
- l[tag] = val
- new_locs.append(l)
- locs = new_locs
- del new_locs
- # locs is all master locations now
-
- for loc in locs:
- fonts.append(font.getGlyphSet(location=loc, normalized=True))
-
- args.inputs = []
-
- for filename in args.inputs:
- if filename.endswith(".ufo"):
- from fontTools.ufoLib import UFOReader
-
- fonts.append(UFOReader(filename))
- else:
- from fontTools.ttLib import TTFont
-
- fonts.append(TTFont(filename))
-
- names.append(basename(filename).rsplit(".", 1)[0])
-
- glyphsets = []
- for font in fonts:
- if hasattr(font, "getGlyphSet"):
- glyphset = font.getGlyphSet()
- else:
- glyphset = font
- glyphsets.append({k: glyphset[k] for k in glyphset.keys()})
-
- if not glyphs:
- glyphs = set([gn for glyphset in glyphsets for gn in glyphset.keys()])
-
- for glyphset in glyphsets:
- glyphSetGlyphNames = set(glyphset.keys())
- diff = glyphs - glyphSetGlyphNames
- if diff:
- for gn in diff:
- glyphset[gn] = None
-
- problems = test(
- glyphsets, glyphs=glyphs, names=names, ignore_missing=args.ignore_missing
- )
-
- if not args.quiet:
- if args.json:
- import json
-
- print(json.dumps(problems))
- else:
- for glyph, glyph_problems in problems.items():
- print(f"Glyph {glyph} was not compatible: ")
- for p in glyph_problems:
- if p["type"] == "missing":
- print(" Glyph was missing in master %s" % p["master"])
- if p["type"] == "open_path":
- print(" Glyph has an open path in master %s" % p["master"])
- if p["type"] == "path_count":
- print(
- " Path count differs: %i in %s, %i in %s"
- % (p["value_1"], p["master_1"], p["value_2"], p["master_2"])
- )
- if p["type"] == "node_count":
- print(
- " Node count differs in path %i: %i in %s, %i in %s"
- % (
- p["path"],
- p["value_1"],
- p["master_1"],
- p["value_2"],
- p["master_2"],
- )
- )
- if p["type"] == "node_incompatibility":
- print(
- " Node %o incompatible in path %i: %s in %s, %s in %s"
- % (
- p["node"],
- p["path"],
- p["value_1"],
- p["master_1"],
- p["value_2"],
- p["master_2"],
- )
- )
- if p["type"] == "contour_order":
- print(
- " Contour order differs: %s in %s, %s in %s"
- % (
- p["value_1"],
- p["master_1"],
- p["value_2"],
- p["master_2"],
- )
- )
- if p["type"] == "wrong_start_point":
- print(
- " Contour %d start point differs: %s, %s"
- % (
- p["contour"],
- p["master_1"],
- p["master_2"],
- )
- )
- if p["type"] == "math_error":
- print(
- " Miscellaneous error in %s: %s"
- % (
- p["master"],
- p["error"],
- )
- )
- if problems:
- return problems
-
-
-if __name__ == "__main__":
- import sys
-
- problems = main()
- sys.exit(int(bool(problems)))
diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/functorch/experimental/ops.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/functorch/experimental/ops.py
deleted file mode 100644
index 42899c20526ff74464383438695a989383ea0935..0000000000000000000000000000000000000000
--- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/functorch/experimental/ops.py
+++ /dev/null
@@ -1 +0,0 @@
-from torch._ops import PyOperator # noqa: F401
diff --git a/spaces/cihyFjudo/fairness-paper-search/The Squat Challenge 16 Week PDF A Beginners Guide to Squatting Exercises.md b/spaces/cihyFjudo/fairness-paper-search/The Squat Challenge 16 Week PDF A Beginners Guide to Squatting Exercises.md
deleted file mode 100644
index 8a1deab56ca8750d6b9b99af8562070ddcd14516..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/The Squat Challenge 16 Week PDF A Beginners Guide to Squatting Exercises.md
+++ /dev/null
@@ -1,22 +0,0 @@
-
-The 30 day squat challenge is best suited for beginners who are looking for a relatively easy and fun way to begin building the habit and discipline to exercise, as well as expand their repertoire of squatting exercises.
-the squat challenge 16 week pdf download
DOWNLOAD ✔ https://tinurli.com/2uwhXw
-Throughout the course of the 30 day challenge, 12 different squat variations will be introduced. First they will appear alone, then the will be combined with other squat variations that have already been learned.
-On this day, you will perform an exercise similar to the squat or deadlift, up to a 1-3 rep max. Exercises can include various forms of the box squat, rack pull, deficit deadlift, and goodmorning, performed at a 1-3 rep max. This movement is usually rotated each week. The meet-style lifts are not performed until the meet.
-Download this PDF poster and Visit my YouTube channel: @BullyJuice for access to all of the challenge. Once downloaded, CLICK ON THE PICTURE of workout one and complete. THEN go to the next PDF file and click on the 2nd workout for that day! ENJOY!
-
-An analysis of the difference in ego resiliency before and after participation in the 16-week Pilates program of elderly women, the primary participant of this study, shows that there are statistically significant differences. Self-confidence, communication efficiency, optimistic trait, and anger management, all of the subvariable consisting of the ego resiliency of elderly women who participated in the 16-week Pilates exercise program, have been improved, compared to their condition before the participation in the Pilates exercise program. To sum it up, these elderly women responded better to impending challenges with confidence, and communicated with other people more positively than they did before their participation in the program. Furthermore, their capability to respond to a situation in an optimistic light and their ability to control anger improved after their participation in the program, suggesting that such changes are stemming from the unique characteristics of Pilates, namely, that Pilates can contribute to improve emotional competence as well as physical capability unlike other exercises, and such objective of Pilates has improved the self-confidence, communication efficiency, optimistic trait, and anger management of the participants, all of which are elements of ego resiliency (Campos de Oliveira et al., 2015).
-In addition to stimulating a wide variety of physiological improvements, HIFT aims to improve performance over a broad spectrum of physical demands. Following 16 weeks of HIFT, participants in the present study increased absolute and relative 5RM front squat strength and performance in all workout challenges. These findings are in agreement with those reported by Heinrich and colleagues [11] who compared a HIFT program to traditional military training protocol in military personal and reported greater improvements during a 2-minute push-up test (4.2 ± 5.4 vs 1.3 ± 5.9), 2-mile run (-89.91 ± 70.23 vs -15.33 ± 69.16 seconds), 1RM bench press (13.2 ± 12.1 vs 2.7 ± 11.5 pounds), and flexibility (seat and reach; 0.6 ± 1.3 vs -0.5 ± 1.5 inches) following HIFT. Likewise, Buckley et al. [8] observed greater improvements following a multimodal high-intensity interval training protocol (MM-HIIT) compared to a row high-intensity training protocol (Row-HIIT) in recreationally-active females in muscle power (broad jump; 6%), 1RM strength (back squat; 39%, overhead press; 27%, and deadlift; 18%), and muscle endurance (back squat repetitions to failure at 70% 1RM; 280%). While it is possible that the observed performance changes were the consequence of specific adaptations to the imposed demands of training, these were likely to have been negated by training experience. HIFT protocols typically vary across training facilities and research investigations. However, their design and exercise composition are generally consistent. Thus, individuals with experience should have been relatively familiar with the specific demands of performance tests at baseline, as well as any potential strategies they might use to maximize performance.
-Walking is the most accessible physical activity for everyone, especially through Lockdowns. Here are some fun activities to help motivate the reluctant walkers to get out and about.
Click on the image to the right to download the PDF of challenges, or sign up to our newsletter below to get our next challenge in your inbox
-The final set of Haktive challenges
The series has over 120 Haktive challenges with everyone different - have fun with them.
Click on the image to the right to download the PDF of challenges, or sign up to our newsletter below to get our next challenge in your inbox
-How many different shapes and balances can you think of?
Click on the image to the right to download the PDF of challenges, or sign up to our newsletter below to get our next challenge in your inbox
-As the bubbles rise the challenges get progressively harder. Have fun with it.
Click on the image to the right to download the PDF of challenges, or sign up to our newsletter below to get our next challenge in your inbox
-Some football challenges to get you ready for the re-start of the Premier League
Click on the image to the right to download the PDF of challenges, or sign up to our newsletter below to get our next challenge in your inbox
-What will you do this week? - A variety of challenges and some competitive ones too.
Click on the image to the right to download the PDF of challenges, or sign up to our newsletter below to get our next challenge in your inbox
-With everyone advised to stay at home the number of toilet rolls each house accumulates is staggering. So we have created some toilet roll focussed active challenges for you. Have fun.
Click on the image to the right to download the PDF of challenges, or sign up to our newsletter below to get our next challenge in your inbox
-The focus for these challenges are eggs. With Easter round the corner these are some fun ideas to challenge them and get them active.
Click on the image to the right to download the PDF of challenges, or sign up to our newsletter below to get our next challenge in your inbox
-If you find yourself stuck at home with the kids, maybe due to poor weather...or perhaps the threat of a global pandemic, then increased physical activity through the below challenges can be a great way to enhance their well being (and yours), and to have some fun together.
..every little bit of activity will help their well being and yours"
Click on the image to the right to download the PDF of challenges, or sign up to our newsletter below to get our next challenge in your inbox
-For all you keen hockey enthusiasts here are some skills and activities you can do at home.
Be adaptable to your skill level and be careful of your surroundings.
Have fun and keep active.
Click on the image to the right to download the PDF of challenge, or sign up to our newsletter below to get our next challenge in your inbox
-Besides resistance training and balance training applied as a single means and the combination thereof, resistance training conducted on unstable surfaces (URT) poses an alternative or complimentary means to improve measures of strength, power and balance. URT combines unstable devices (e.g., Swiss balls, BOSU® balls, wobble boards, etc.) and an external load (e.g., body weight, barbell, dumbbell) within one exercise (e.g., squats on a foam block). Because of the instability-related reduction of force, power production and movement velocity [12, 13] during URT when compared to traditional resistance training on stable surfaces (SRT), it was previously argued that URT lacks sufficient strain to induce adaptive stimuli [14]. Several studies however, investigating muscular activity during the performance of strength exercises on stable and unstable surfaces demonstrated similar or even higher muscle activation in URT as compared to SRT [13, 15]. According to Behm and Colado [16], there are two components to URT: balance and load/resistance. The balance component of URT seems to activate stabilising muscles of the core and trigger stabilising function of prime movers in response to greater postural challenges [16, 17]. In consequence, URT can generate appropriate stress to exceed training thresholds and ensure neuromuscular adaptive processes. For example, Kibele and Behm [11] found superior improvements in the single leg hop test following URT compared to SRT in healthy young adults. In line with the principle of training specificity [18] they concluded that URT induced higher additional balance and stabilising adaptations, which were more prominent in the balance demanding single leg hop test. Yet the feasibility and effectiveness of URT compared to SRT on measures of lower-extremity strength, power and balance is insufficient in older adults.
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/charset_normalizer/api.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/charset_normalizer/api.py
deleted file mode 100644
index 0ba08e3a50ba6d61e75f3f31772eb4dfdd3f8f05..0000000000000000000000000000000000000000
--- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/charset_normalizer/api.py
+++ /dev/null
@@ -1,626 +0,0 @@
-import logging
-from os import PathLike
-from typing import BinaryIO, List, Optional, Set, Union
-
-from .cd import (
- coherence_ratio,
- encoding_languages,
- mb_encoding_languages,
- merge_coherence_ratios,
-)
-from .constant import IANA_SUPPORTED, TOO_BIG_SEQUENCE, TOO_SMALL_SEQUENCE, TRACE
-from .md import mess_ratio
-from .models import CharsetMatch, CharsetMatches
-from .utils import (
- any_specified_encoding,
- cut_sequence_chunks,
- iana_name,
- identify_sig_or_bom,
- is_cp_similar,
- is_multi_byte_encoding,
- should_strip_sig_or_bom,
-)
-
-# Will most likely be controversial
-# logging.addLevelName(TRACE, "TRACE")
-logger = logging.getLogger("charset_normalizer")
-explain_handler = logging.StreamHandler()
-explain_handler.setFormatter(
- logging.Formatter("%(asctime)s | %(levelname)s | %(message)s")
-)
-
-
-def from_bytes(
- sequences: Union[bytes, bytearray],
- steps: int = 5,
- chunk_size: int = 512,
- threshold: float = 0.2,
- cp_isolation: Optional[List[str]] = None,
- cp_exclusion: Optional[List[str]] = None,
- preemptive_behaviour: bool = True,
- explain: bool = False,
- language_threshold: float = 0.1,
- enable_fallback: bool = True,
-) -> CharsetMatches:
- """
- Given a raw bytes sequence, return the best possibles charset usable to render str objects.
- If there is no results, it is a strong indicator that the source is binary/not text.
- By default, the process will extract 5 blocks of 512o each to assess the mess and coherence of a given sequence.
- And will give up a particular code page after 20% of measured mess. Those criteria are customizable at will.
-
- The preemptive behavior DOES NOT replace the traditional detection workflow, it prioritize a particular code page
- but never take it for granted. Can improve the performance.
-
- You may want to focus your attention to some code page or/and not others, use cp_isolation and cp_exclusion for that
- purpose.
-
- This function will strip the SIG in the payload/sequence every time except on UTF-16, UTF-32.
- By default the library does not setup any handler other than the NullHandler, if you choose to set the 'explain'
- toggle to True it will alter the logger configuration to add a StreamHandler that is suitable for debugging.
- Custom logging format and handler can be set manually.
- """
-
- if not isinstance(sequences, (bytearray, bytes)):
- raise TypeError(
- "Expected object of type bytes or bytearray, got: {0}".format(
- type(sequences)
- )
- )
-
- if explain:
- previous_logger_level: int = logger.level
- logger.addHandler(explain_handler)
- logger.setLevel(TRACE)
-
- length: int = len(sequences)
-
- if length == 0:
- logger.debug("Encoding detection on empty bytes, assuming utf_8 intention.")
- if explain:
- logger.removeHandler(explain_handler)
- logger.setLevel(previous_logger_level or logging.WARNING)
- return CharsetMatches([CharsetMatch(sequences, "utf_8", 0.0, False, [], "")])
-
- if cp_isolation is not None:
- logger.log(
- TRACE,
- "cp_isolation is set. use this flag for debugging purpose. "
- "limited list of encoding allowed : %s.",
- ", ".join(cp_isolation),
- )
- cp_isolation = [iana_name(cp, False) for cp in cp_isolation]
- else:
- cp_isolation = []
-
- if cp_exclusion is not None:
- logger.log(
- TRACE,
- "cp_exclusion is set. use this flag for debugging purpose. "
- "limited list of encoding excluded : %s.",
- ", ".join(cp_exclusion),
- )
- cp_exclusion = [iana_name(cp, False) for cp in cp_exclusion]
- else:
- cp_exclusion = []
-
- if length <= (chunk_size * steps):
- logger.log(
- TRACE,
- "override steps (%i) and chunk_size (%i) as content does not fit (%i byte(s) given) parameters.",
- steps,
- chunk_size,
- length,
- )
- steps = 1
- chunk_size = length
-
- if steps > 1 and length / steps < chunk_size:
- chunk_size = int(length / steps)
-
- is_too_small_sequence: bool = len(sequences) < TOO_SMALL_SEQUENCE
- is_too_large_sequence: bool = len(sequences) >= TOO_BIG_SEQUENCE
-
- if is_too_small_sequence:
- logger.log(
- TRACE,
- "Trying to detect encoding from a tiny portion of ({}) byte(s).".format(
- length
- ),
- )
- elif is_too_large_sequence:
- logger.log(
- TRACE,
- "Using lazy str decoding because the payload is quite large, ({}) byte(s).".format(
- length
- ),
- )
-
- prioritized_encodings: List[str] = []
-
- specified_encoding: Optional[str] = (
- any_specified_encoding(sequences) if preemptive_behaviour else None
- )
-
- if specified_encoding is not None:
- prioritized_encodings.append(specified_encoding)
- logger.log(
- TRACE,
- "Detected declarative mark in sequence. Priority +1 given for %s.",
- specified_encoding,
- )
-
- tested: Set[str] = set()
- tested_but_hard_failure: List[str] = []
- tested_but_soft_failure: List[str] = []
-
- fallback_ascii: Optional[CharsetMatch] = None
- fallback_u8: Optional[CharsetMatch] = None
- fallback_specified: Optional[CharsetMatch] = None
-
- results: CharsetMatches = CharsetMatches()
-
- sig_encoding, sig_payload = identify_sig_or_bom(sequences)
-
- if sig_encoding is not None:
- prioritized_encodings.append(sig_encoding)
- logger.log(
- TRACE,
- "Detected a SIG or BOM mark on first %i byte(s). Priority +1 given for %s.",
- len(sig_payload),
- sig_encoding,
- )
-
- prioritized_encodings.append("ascii")
-
- if "utf_8" not in prioritized_encodings:
- prioritized_encodings.append("utf_8")
-
- for encoding_iana in prioritized_encodings + IANA_SUPPORTED:
- if cp_isolation and encoding_iana not in cp_isolation:
- continue
-
- if cp_exclusion and encoding_iana in cp_exclusion:
- continue
-
- if encoding_iana in tested:
- continue
-
- tested.add(encoding_iana)
-
- decoded_payload: Optional[str] = None
- bom_or_sig_available: bool = sig_encoding == encoding_iana
- strip_sig_or_bom: bool = bom_or_sig_available and should_strip_sig_or_bom(
- encoding_iana
- )
-
- if encoding_iana in {"utf_16", "utf_32"} and not bom_or_sig_available:
- logger.log(
- TRACE,
- "Encoding %s won't be tested as-is because it require a BOM. Will try some sub-encoder LE/BE.",
- encoding_iana,
- )
- continue
- if encoding_iana in {"utf_7"} and not bom_or_sig_available:
- logger.log(
- TRACE,
- "Encoding %s won't be tested as-is because detection is unreliable without BOM/SIG.",
- encoding_iana,
- )
- continue
-
- try:
- is_multi_byte_decoder: bool = is_multi_byte_encoding(encoding_iana)
- except (ModuleNotFoundError, ImportError):
- logger.log(
- TRACE,
- "Encoding %s does not provide an IncrementalDecoder",
- encoding_iana,
- )
- continue
-
- try:
- if is_too_large_sequence and is_multi_byte_decoder is False:
- str(
- sequences[: int(50e4)]
- if strip_sig_or_bom is False
- else sequences[len(sig_payload) : int(50e4)],
- encoding=encoding_iana,
- )
- else:
- decoded_payload = str(
- sequences
- if strip_sig_or_bom is False
- else sequences[len(sig_payload) :],
- encoding=encoding_iana,
- )
- except (UnicodeDecodeError, LookupError) as e:
- if not isinstance(e, LookupError):
- logger.log(
- TRACE,
- "Code page %s does not fit given bytes sequence at ALL. %s",
- encoding_iana,
- str(e),
- )
- tested_but_hard_failure.append(encoding_iana)
- continue
-
- similar_soft_failure_test: bool = False
-
- for encoding_soft_failed in tested_but_soft_failure:
- if is_cp_similar(encoding_iana, encoding_soft_failed):
- similar_soft_failure_test = True
- break
-
- if similar_soft_failure_test:
- logger.log(
- TRACE,
- "%s is deemed too similar to code page %s and was consider unsuited already. Continuing!",
- encoding_iana,
- encoding_soft_failed,
- )
- continue
-
- r_ = range(
- 0 if not bom_or_sig_available else len(sig_payload),
- length,
- int(length / steps),
- )
-
- multi_byte_bonus: bool = (
- is_multi_byte_decoder
- and decoded_payload is not None
- and len(decoded_payload) < length
- )
-
- if multi_byte_bonus:
- logger.log(
- TRACE,
- "Code page %s is a multi byte encoding table and it appear that at least one character "
- "was encoded using n-bytes.",
- encoding_iana,
- )
-
- max_chunk_gave_up: int = int(len(r_) / 4)
-
- max_chunk_gave_up = max(max_chunk_gave_up, 2)
- early_stop_count: int = 0
- lazy_str_hard_failure = False
-
- md_chunks: List[str] = []
- md_ratios = []
-
- try:
- for chunk in cut_sequence_chunks(
- sequences,
- encoding_iana,
- r_,
- chunk_size,
- bom_or_sig_available,
- strip_sig_or_bom,
- sig_payload,
- is_multi_byte_decoder,
- decoded_payload,
- ):
- md_chunks.append(chunk)
-
- md_ratios.append(
- mess_ratio(
- chunk,
- threshold,
- explain is True and 1 <= len(cp_isolation) <= 2,
- )
- )
-
- if md_ratios[-1] >= threshold:
- early_stop_count += 1
-
- if (early_stop_count >= max_chunk_gave_up) or (
- bom_or_sig_available and strip_sig_or_bom is False
- ):
- break
- except (
- UnicodeDecodeError
- ) as e: # Lazy str loading may have missed something there
- logger.log(
- TRACE,
- "LazyStr Loading: After MD chunk decode, code page %s does not fit given bytes sequence at ALL. %s",
- encoding_iana,
- str(e),
- )
- early_stop_count = max_chunk_gave_up
- lazy_str_hard_failure = True
-
- # We might want to check the sequence again with the whole content
- # Only if initial MD tests passes
- if (
- not lazy_str_hard_failure
- and is_too_large_sequence
- and not is_multi_byte_decoder
- ):
- try:
- sequences[int(50e3) :].decode(encoding_iana, errors="strict")
- except UnicodeDecodeError as e:
- logger.log(
- TRACE,
- "LazyStr Loading: After final lookup, code page %s does not fit given bytes sequence at ALL. %s",
- encoding_iana,
- str(e),
- )
- tested_but_hard_failure.append(encoding_iana)
- continue
-
- mean_mess_ratio: float = sum(md_ratios) / len(md_ratios) if md_ratios else 0.0
- if mean_mess_ratio >= threshold or early_stop_count >= max_chunk_gave_up:
- tested_but_soft_failure.append(encoding_iana)
- logger.log(
- TRACE,
- "%s was excluded because of initial chaos probing. Gave up %i time(s). "
- "Computed mean chaos is %f %%.",
- encoding_iana,
- early_stop_count,
- round(mean_mess_ratio * 100, ndigits=3),
- )
- # Preparing those fallbacks in case we got nothing.
- if (
- enable_fallback
- and encoding_iana in ["ascii", "utf_8", specified_encoding]
- and not lazy_str_hard_failure
- ):
- fallback_entry = CharsetMatch(
- sequences, encoding_iana, threshold, False, [], decoded_payload
- )
- if encoding_iana == specified_encoding:
- fallback_specified = fallback_entry
- elif encoding_iana == "ascii":
- fallback_ascii = fallback_entry
- else:
- fallback_u8 = fallback_entry
- continue
-
- logger.log(
- TRACE,
- "%s passed initial chaos probing. Mean measured chaos is %f %%",
- encoding_iana,
- round(mean_mess_ratio * 100, ndigits=3),
- )
-
- if not is_multi_byte_decoder:
- target_languages: List[str] = encoding_languages(encoding_iana)
- else:
- target_languages = mb_encoding_languages(encoding_iana)
-
- if target_languages:
- logger.log(
- TRACE,
- "{} should target any language(s) of {}".format(
- encoding_iana, str(target_languages)
- ),
- )
-
- cd_ratios = []
-
- # We shall skip the CD when its about ASCII
- # Most of the time its not relevant to run "language-detection" on it.
- if encoding_iana != "ascii":
- for chunk in md_chunks:
- chunk_languages = coherence_ratio(
- chunk,
- language_threshold,
- ",".join(target_languages) if target_languages else None,
- )
-
- cd_ratios.append(chunk_languages)
-
- cd_ratios_merged = merge_coherence_ratios(cd_ratios)
-
- if cd_ratios_merged:
- logger.log(
- TRACE,
- "We detected language {} using {}".format(
- cd_ratios_merged, encoding_iana
- ),
- )
-
- results.append(
- CharsetMatch(
- sequences,
- encoding_iana,
- mean_mess_ratio,
- bom_or_sig_available,
- cd_ratios_merged,
- decoded_payload,
- )
- )
-
- if (
- encoding_iana in [specified_encoding, "ascii", "utf_8"]
- and mean_mess_ratio < 0.1
- ):
- logger.debug(
- "Encoding detection: %s is most likely the one.", encoding_iana
- )
- if explain:
- logger.removeHandler(explain_handler)
- logger.setLevel(previous_logger_level)
- return CharsetMatches([results[encoding_iana]])
-
- if encoding_iana == sig_encoding:
- logger.debug(
- "Encoding detection: %s is most likely the one as we detected a BOM or SIG within "
- "the beginning of the sequence.",
- encoding_iana,
- )
- if explain:
- logger.removeHandler(explain_handler)
- logger.setLevel(previous_logger_level)
- return CharsetMatches([results[encoding_iana]])
-
- if len(results) == 0:
- if fallback_u8 or fallback_ascii or fallback_specified:
- logger.log(
- TRACE,
- "Nothing got out of the detection process. Using ASCII/UTF-8/Specified fallback.",
- )
-
- if fallback_specified:
- logger.debug(
- "Encoding detection: %s will be used as a fallback match",
- fallback_specified.encoding,
- )
- results.append(fallback_specified)
- elif (
- (fallback_u8 and fallback_ascii is None)
- or (
- fallback_u8
- and fallback_ascii
- and fallback_u8.fingerprint != fallback_ascii.fingerprint
- )
- or (fallback_u8 is not None)
- ):
- logger.debug("Encoding detection: utf_8 will be used as a fallback match")
- results.append(fallback_u8)
- elif fallback_ascii:
- logger.debug("Encoding detection: ascii will be used as a fallback match")
- results.append(fallback_ascii)
-
- if results:
- logger.debug(
- "Encoding detection: Found %s as plausible (best-candidate) for content. With %i alternatives.",
- results.best().encoding, # type: ignore
- len(results) - 1,
- )
- else:
- logger.debug("Encoding detection: Unable to determine any suitable charset.")
-
- if explain:
- logger.removeHandler(explain_handler)
- logger.setLevel(previous_logger_level)
-
- return results
-
-
-def from_fp(
- fp: BinaryIO,
- steps: int = 5,
- chunk_size: int = 512,
- threshold: float = 0.20,
- cp_isolation: Optional[List[str]] = None,
- cp_exclusion: Optional[List[str]] = None,
- preemptive_behaviour: bool = True,
- explain: bool = False,
- language_threshold: float = 0.1,
- enable_fallback: bool = True,
-) -> CharsetMatches:
- """
- Same thing than the function from_bytes but using a file pointer that is already ready.
- Will not close the file pointer.
- """
- return from_bytes(
- fp.read(),
- steps,
- chunk_size,
- threshold,
- cp_isolation,
- cp_exclusion,
- preemptive_behaviour,
- explain,
- language_threshold,
- enable_fallback,
- )
-
-
-def from_path(
- path: Union[str, bytes, PathLike], # type: ignore[type-arg]
- steps: int = 5,
- chunk_size: int = 512,
- threshold: float = 0.20,
- cp_isolation: Optional[List[str]] = None,
- cp_exclusion: Optional[List[str]] = None,
- preemptive_behaviour: bool = True,
- explain: bool = False,
- language_threshold: float = 0.1,
- enable_fallback: bool = True,
-) -> CharsetMatches:
- """
- Same thing than the function from_bytes but with one extra step. Opening and reading given file path in binary mode.
- Can raise IOError.
- """
- with open(path, "rb") as fp:
- return from_fp(
- fp,
- steps,
- chunk_size,
- threshold,
- cp_isolation,
- cp_exclusion,
- preemptive_behaviour,
- explain,
- language_threshold,
- enable_fallback,
- )
-
-
-def is_binary(
- fp_or_path_or_payload: Union[PathLike, str, BinaryIO, bytes], # type: ignore[type-arg]
- steps: int = 5,
- chunk_size: int = 512,
- threshold: float = 0.20,
- cp_isolation: Optional[List[str]] = None,
- cp_exclusion: Optional[List[str]] = None,
- preemptive_behaviour: bool = True,
- explain: bool = False,
- language_threshold: float = 0.1,
- enable_fallback: bool = False,
-) -> bool:
- """
- Detect if the given input (file, bytes, or path) points to a binary file. aka. not a string.
- Based on the same main heuristic algorithms and default kwargs at the sole exception that fallbacks match
- are disabled to be stricter around ASCII-compatible but unlikely to be a string.
- """
- if isinstance(fp_or_path_or_payload, (str, PathLike)):
- guesses = from_path(
- fp_or_path_or_payload,
- steps=steps,
- chunk_size=chunk_size,
- threshold=threshold,
- cp_isolation=cp_isolation,
- cp_exclusion=cp_exclusion,
- preemptive_behaviour=preemptive_behaviour,
- explain=explain,
- language_threshold=language_threshold,
- enable_fallback=enable_fallback,
- )
- elif isinstance(
- fp_or_path_or_payload,
- (
- bytes,
- bytearray,
- ),
- ):
- guesses = from_bytes(
- fp_or_path_or_payload,
- steps=steps,
- chunk_size=chunk_size,
- threshold=threshold,
- cp_isolation=cp_isolation,
- cp_exclusion=cp_exclusion,
- preemptive_behaviour=preemptive_behaviour,
- explain=explain,
- language_threshold=language_threshold,
- enable_fallback=enable_fallback,
- )
- else:
- guesses = from_fp(
- fp_or_path_or_payload,
- steps=steps,
- chunk_size=chunk_size,
- threshold=threshold,
- cp_isolation=cp_isolation,
- cp_exclusion=cp_exclusion,
- preemptive_behaviour=preemptive_behaviour,
- explain=explain,
- language_threshold=language_threshold,
- enable_fallback=enable_fallback,
- )
-
- return not guesses
diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/dateutil/_common.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/dateutil/_common.py
deleted file mode 100644
index 4eb2659bd2986125fcfb4afea5bae9efc2dcd1a0..0000000000000000000000000000000000000000
--- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/dateutil/_common.py
+++ /dev/null
@@ -1,43 +0,0 @@
-"""
-Common code used in multiple modules.
-"""
-
-
-class weekday(object):
- __slots__ = ["weekday", "n"]
-
- def __init__(self, weekday, n=None):
- self.weekday = weekday
- self.n = n
-
- def __call__(self, n):
- if n == self.n:
- return self
- else:
- return self.__class__(self.weekday, n)
-
- def __eq__(self, other):
- try:
- if self.weekday != other.weekday or self.n != other.n:
- return False
- except AttributeError:
- return False
- return True
-
- def __hash__(self):
- return hash((
- self.weekday,
- self.n,
- ))
-
- def __ne__(self, other):
- return not (self == other)
-
- def __repr__(self):
- s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday]
- if not self.n:
- return s
- else:
- return "%s(%+d)" % (s, self.n)
-
-# vim:ts=4:sw=4:et
diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dca_parser.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dca_parser.c
deleted file mode 100644
index 3148397b7dcb45a80a27fea0f4fe86edb72312c6..0000000000000000000000000000000000000000
--- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dca_parser.c
+++ /dev/null
@@ -1,352 +0,0 @@
-/*
- * DCA parser
- * Copyright (C) 2004 Gildas Bazin
- * Copyright (C) 2004 Benjamin Zores
- * Copyright (C) 2006 Benjamin Larsson
- * Copyright (C) 2007 Konstantin Shishkov
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include "dca.h"
-#include "dca_core.h"
-#include "dca_exss.h"
-#include "dca_lbr.h"
-#include "dca_syncwords.h"
-#include "get_bits.h"
-#include "parser.h"
-
-typedef struct DCAParseContext {
- ParseContext pc;
- uint32_t lastmarker;
- int size;
- int framesize;
- unsigned int startpos;
- DCAExssParser exss;
- unsigned int sr_code;
-} DCAParseContext;
-
-#define IS_CORE_MARKER(state) \
- (((state & 0xFFFFFFFFF0FF) == (((uint64_t)DCA_SYNCWORD_CORE_14B_LE << 16) | 0xF007)) || \
- ((state & 0xFFFFFFFFFFF0) == (((uint64_t)DCA_SYNCWORD_CORE_14B_BE << 16) | 0x07F0)) || \
- ((state & 0xFFFFFFFF00FC) == (((uint64_t)DCA_SYNCWORD_CORE_LE << 16) | 0x00FC)) || \
- ((state & 0xFFFFFFFFFC00) == (((uint64_t)DCA_SYNCWORD_CORE_BE << 16) | 0xFC00)))
-
-#define IS_EXSS_MARKER(state) ((state & 0xFFFFFFFF) == DCA_SYNCWORD_SUBSTREAM)
-
-#define IS_MARKER(state) (IS_CORE_MARKER(state) || IS_EXSS_MARKER(state))
-
-#define CORE_MARKER(state) ((state >> 16) & 0xFFFFFFFF)
-#define EXSS_MARKER(state) (state & 0xFFFFFFFF)
-
-#define STATE_LE(state) (((state & 0xFF00FF00) >> 8) | ((state & 0x00FF00FF) << 8))
-#define STATE_14(state) (((state & 0x3FFF0000) >> 8) | ((state & 0x00003FFF) >> 6))
-
-#define CORE_FRAMESIZE(state) (((state >> 4) & 0x3FFF) + 1)
-#define EXSS_FRAMESIZE(state) ((state & 0x2000000000) ? \
- ((state >> 5) & 0xFFFFF) + 1 : \
- ((state >> 13) & 0x0FFFF) + 1)
-
-/**
- * Find the end of the current frame in the bitstream.
- * @return the position of the first byte of the next frame, or -1
- */
-static int dca_find_frame_end(DCAParseContext *pc1, const uint8_t *buf,
- int buf_size)
-{
- int start_found, size, i;
- uint64_t state;
- ParseContext *pc = &pc1->pc;
-
- start_found = pc->frame_start_found;
- state = pc->state64;
- size = pc1->size;
-
- i = 0;
- if (!start_found) {
- for (; i < buf_size; i++) {
- size++;
- state = (state << 8) | buf[i];
-
- if (IS_MARKER(state) &&
- (!pc1->lastmarker ||
- pc1->lastmarker == CORE_MARKER(state) ||
- pc1->lastmarker == DCA_SYNCWORD_SUBSTREAM)) {
- if (!pc1->lastmarker)
- pc1->startpos = IS_EXSS_MARKER(state) ? size - 4 : size - 6;
-
- if (IS_EXSS_MARKER(state))
- pc1->lastmarker = EXSS_MARKER(state);
- else
- pc1->lastmarker = CORE_MARKER(state);
-
- start_found = 1;
- size = 0;
-
- i++;
- break;
- }
- }
- }
-
- if (start_found) {
- for (; i < buf_size; i++) {
- size++;
- state = (state << 8) | buf[i];
-
- if (start_found == 1) {
- switch (pc1->lastmarker) {
- case DCA_SYNCWORD_CORE_BE:
- if (size == 2) {
- pc1->framesize = CORE_FRAMESIZE(state);
- start_found = 2;
- }
- break;
- case DCA_SYNCWORD_CORE_LE:
- if (size == 2) {
- pc1->framesize = CORE_FRAMESIZE(STATE_LE(state));
- start_found = 4;
- }
- break;
- case DCA_SYNCWORD_CORE_14B_BE:
- if (size == 4) {
- pc1->framesize = CORE_FRAMESIZE(STATE_14(state));
- start_found = 4;
- }
- break;
- case DCA_SYNCWORD_CORE_14B_LE:
- if (size == 4) {
- pc1->framesize = CORE_FRAMESIZE(STATE_14(STATE_LE(state)));
- start_found = 4;
- }
- break;
- case DCA_SYNCWORD_SUBSTREAM:
- if (size == 6) {
- pc1->framesize = EXSS_FRAMESIZE(state);
- start_found = 4;
- }
- break;
- default:
- av_assert0(0);
- }
- continue;
- }
-
- if (start_found == 2 && IS_EXSS_MARKER(state) &&
- pc1->framesize <= size + 2) {
- pc1->framesize = size + 2;
- start_found = 3;
- continue;
- }
-
- if (start_found == 3) {
- if (size == pc1->framesize + 4) {
- pc1->framesize += EXSS_FRAMESIZE(state);
- start_found = 4;
- }
- continue;
- }
-
- if (pc1->framesize > size)
- continue;
-
- if (IS_MARKER(state) &&
- (pc1->lastmarker == CORE_MARKER(state) ||
- pc1->lastmarker == DCA_SYNCWORD_SUBSTREAM)) {
- pc->frame_start_found = 0;
- pc->state64 = -1;
- pc1->size = 0;
- return IS_EXSS_MARKER(state) ? i - 3 : i - 5;
- }
- }
- }
-
- pc->frame_start_found = start_found;
- pc->state64 = state;
- pc1->size = size;
- return END_NOT_FOUND;
-}
-
-static av_cold int dca_parse_init(AVCodecParserContext *s)
-{
- DCAParseContext *pc1 = s->priv_data;
-
- pc1->lastmarker = 0;
- pc1->sr_code = -1;
- return 0;
-}
-
-static int dca_parse_params(DCAParseContext *pc1, const uint8_t *buf,
- int buf_size, int *duration, int *sample_rate,
- int *profile)
-{
- DCAExssAsset *asset = &pc1->exss.assets[0];
- GetBitContext gb;
- DCACoreFrameHeader h;
- uint8_t hdr[DCA_CORE_FRAME_HEADER_SIZE + AV_INPUT_BUFFER_PADDING_SIZE] = { 0 };
- int ret, frame_size;
-
- if (buf_size < DCA_CORE_FRAME_HEADER_SIZE)
- return AVERROR_INVALIDDATA;
-
- if (AV_RB32(buf) == DCA_SYNCWORD_SUBSTREAM) {
- if ((ret = ff_dca_exss_parse(&pc1->exss, buf, buf_size)) < 0)
- return ret;
-
- if (asset->extension_mask & DCA_EXSS_LBR) {
- if ((ret = init_get_bits8(&gb, buf + asset->lbr_offset, asset->lbr_size)) < 0)
- return ret;
-
- if (get_bits_long(&gb, 32) != DCA_SYNCWORD_LBR)
- return AVERROR_INVALIDDATA;
-
- switch (get_bits(&gb, 8)) {
- case DCA_LBR_HEADER_DECODER_INIT:
- pc1->sr_code = get_bits(&gb, 8);
- case DCA_LBR_HEADER_SYNC_ONLY:
- break;
- default:
- return AVERROR_INVALIDDATA;
- }
-
- if (pc1->sr_code >= FF_ARRAY_ELEMS(ff_dca_sampling_freqs))
- return AVERROR_INVALIDDATA;
-
- *sample_rate = ff_dca_sampling_freqs[pc1->sr_code];
- *duration = 1024 << ff_dca_freq_ranges[pc1->sr_code];
- *profile = FF_PROFILE_DTS_EXPRESS;
- return 0;
- }
-
- if (asset->extension_mask & DCA_EXSS_XLL) {
- int nsamples_log2;
-
- if ((ret = init_get_bits8(&gb, buf + asset->xll_offset, asset->xll_size)) < 0)
- return ret;
-
- if (get_bits_long(&gb, 32) != DCA_SYNCWORD_XLL)
- return AVERROR_INVALIDDATA;
-
- if (get_bits(&gb, 4))
- return AVERROR_INVALIDDATA;
-
- skip_bits(&gb, 8);
- skip_bits_long(&gb, get_bits(&gb, 5) + 1);
- skip_bits(&gb, 4);
- nsamples_log2 = get_bits(&gb, 4) + get_bits(&gb, 4);
- if (nsamples_log2 > 24)
- return AVERROR_INVALIDDATA;
-
- *sample_rate = asset->max_sample_rate;
- *duration = (1 + (*sample_rate > 96000)) << nsamples_log2;
- *profile = FF_PROFILE_DTS_HD_MA;
- return 0;
- }
-
- return AVERROR_INVALIDDATA;
- }
-
- if ((ret = avpriv_dca_convert_bitstream(buf, DCA_CORE_FRAME_HEADER_SIZE,
- hdr, DCA_CORE_FRAME_HEADER_SIZE)) < 0)
- return ret;
- if (avpriv_dca_parse_core_frame_header(&h, hdr, ret) < 0)
- return AVERROR_INVALIDDATA;
-
- *duration = h.npcmblocks * DCA_PCMBLOCK_SAMPLES;
- *sample_rate = ff_dca_sample_rates[h.sr_code];
- if (*profile != FF_PROFILE_UNKNOWN)
- return 0;
-
- *profile = FF_PROFILE_DTS;
- if (h.ext_audio_present) {
- switch (h.ext_audio_type) {
- case DCA_EXT_AUDIO_XCH:
- case DCA_EXT_AUDIO_XXCH:
- *profile = FF_PROFILE_DTS_ES;
- break;
- case DCA_EXT_AUDIO_X96:
- *profile = FF_PROFILE_DTS_96_24;
- break;
- }
- }
-
- frame_size = FFALIGN(h.frame_size, 4);
- if (buf_size - 4 < frame_size)
- return 0;
-
- buf += frame_size;
- buf_size -= frame_size;
- if (AV_RB32(buf) != DCA_SYNCWORD_SUBSTREAM)
- return 0;
- if (ff_dca_exss_parse(&pc1->exss, buf, buf_size) < 0)
- return 0;
-
- if (asset->extension_mask & DCA_EXSS_XLL)
- *profile = FF_PROFILE_DTS_HD_MA;
- else if (asset->extension_mask & (DCA_EXSS_XBR | DCA_EXSS_XXCH | DCA_EXSS_X96))
- *profile = FF_PROFILE_DTS_HD_HRA;
-
- return 0;
-}
-
-static int dca_parse(AVCodecParserContext *s, AVCodecContext *avctx,
- const uint8_t **poutbuf, int *poutbuf_size,
- const uint8_t *buf, int buf_size)
-{
- DCAParseContext *pc1 = s->priv_data;
- ParseContext *pc = &pc1->pc;
- int next, duration, sample_rate;
-
- if (s->flags & PARSER_FLAG_COMPLETE_FRAMES) {
- next = buf_size;
- } else {
- next = dca_find_frame_end(pc1, buf, buf_size);
-
- if (ff_combine_frame(pc, next, &buf, &buf_size) < 0) {
- *poutbuf = NULL;
- *poutbuf_size = 0;
- return buf_size;
- }
-
- /* skip initial padding */
- if (buf_size > pc1->startpos) {
- buf += pc1->startpos;
- buf_size -= pc1->startpos;
- }
- pc1->startpos = 0;
- }
-
- /* read the duration and sample rate from the frame header */
- if (!dca_parse_params(pc1, buf, buf_size, &duration, &sample_rate, &avctx->profile)) {
- if (!avctx->sample_rate)
- avctx->sample_rate = sample_rate;
- s->duration = av_rescale(duration, avctx->sample_rate, sample_rate);
- } else
- s->duration = 0;
-
- *poutbuf = buf;
- *poutbuf_size = buf_size;
- return next;
-}
-
-const AVCodecParser ff_dca_parser = {
- .codec_ids = { AV_CODEC_ID_DTS },
- .priv_data_size = sizeof(DCAParseContext),
- .parser_init = dca_parse_init,
- .parser_parse = dca_parse,
- .parser_close = ff_parse_close,
-};
diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/jpegquanttables.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/jpegquanttables.c
deleted file mode 100644
index 2ab50016c5dd8270c722b1d1366d186d2daf3e35..0000000000000000000000000000000000000000
--- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/jpegquanttables.c
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * MJPEG encoder and decoder
- * Copyright (c) 2000, 2001 Fabrice Bellard
- * Copyright (c) 2003 Alex Beregszaszi
- * Copyright (c) 2003-2004 Michael Niedermayer
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-/**
- * @file
- * MJPEG quantization tables
- */
-
-#include "jpegquanttables.h"
-
-/* These are the sample quantization tables given in JPEG spec section K.1.
- * The spec says that the values given produce "good" quality, and
- * when divided by 2, "very good" quality.
- */
-const uint8_t ff_mjpeg_std_luminance_quant_tbl[64] = {
- 16, 11, 10, 16, 24, 40, 51, 61,
- 12, 12, 14, 19, 26, 58, 60, 55,
- 14, 13, 16, 24, 40, 57, 69, 56,
- 14, 17, 22, 29, 51, 87, 80, 62,
- 18, 22, 37, 56, 68, 109, 103, 77,
- 24, 35, 55, 64, 81, 104, 113, 92,
- 49, 64, 78, 87, 103, 121, 120, 101,
- 72, 92, 95, 98, 112, 100, 103, 99
-};
-const uint8_t ff_mjpeg_std_chrominance_quant_tbl[64] = {
- 17, 18, 24, 47, 99, 99, 99, 99,
- 18, 21, 26, 66, 99, 99, 99, 99,
- 24, 26, 56, 99, 99, 99, 99, 99,
- 47, 66, 99, 99, 99, 99, 99, 99,
- 99, 99, 99, 99, 99, 99, 99, 99,
- 99, 99, 99, 99, 99, 99, 99, 99,
- 99, 99, 99, 99, 99, 99, 99, 99,
- 99, 99, 99, 99, 99, 99, 99, 99
-};
diff --git a/spaces/congsaPfin/Manga-OCR/logs/How to Download Bihar Board Dummy Admit Card 2023 for Class 10 and 12 Exams.md b/spaces/congsaPfin/Manga-OCR/logs/How to Download Bihar Board Dummy Admit Card 2023 for Class 10 and 12 Exams.md
deleted file mode 100644
index f7f59063fecb8d33ed76e60129d08c4406d08dbd..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/How to Download Bihar Board Dummy Admit Card 2023 for Class 10 and 12 Exams.md
+++ /dev/null
@@ -1,203 +0,0 @@
-
-Dummy Admit Card 10th 2023 Bihar Board Download
-If you are a class 10 student who has registered for the Bihar board exams in 2023, then you must be wondering how to download your dummy admit card. A dummy admit card is a provisional admit card that is issued by the Bihar School Examination Board (BSEB) before the final admit card. It contains your personal and academic details such as name, date of birth, photo, signature, subjects, etc. It is very important to download and check your dummy admit card for any errors or discrepancies as it may affect your final admit card and exam results. In this article, we will tell you how to download your dummy admit card online, how to make corrections in it, when to expect your final admit card and exam date sheet, and what are the important instructions to follow while appearing for the board exams.
- What is a dummy admit card and why is it important?
-A dummy admit card is a provisional admit card issued by the Bihar School Examination Board (BSEB) for class 10 students who have registered for the board exams in 2023.
-A dummy admit card is a temporary document that is generated by BSEB based on the information provided by the students during their registration process. It is not a valid document for appearing in the board exams. It is only meant to give an idea of how your final admit card will look like and what details it will contain.
-dummy admit card 10th 2023 bihar board download
Download Zip ✏ ✏ ✏ https://urlca.com/2uO8ee
-It is important to download and check the dummy admit card for any errors or discrepancies in the personal and academic details of the students.
-It is very important to download and check your dummy admit card as soon as it is available on the official website of BSEB. You should verify all the details such as your name, father's name, mother's name, date of birth, gender, category, photo, signature, subjects, exam centre, etc. If you find any mistake or mismatch in your dummy admit card, you should report it to your school administration immediately. You should also keep a copy of your dummy admit card for future reference.
- How to download the dummy admit card 10th 2023 Bihar Board online?
-The steps to download the dummy admit card are as follows:
-Visit the official website of BSEB - ssonline.biharboardonline.com or biharboardonline.com
-The dummy admit card for class 10 students will be available on the official website of BSEB from July 27 to August 4, 2022. You can visit either ssonline.biharboardonline.com or biharboardonline.com to access the link to download your dummy admit card.
-Click on the link to download the dummy admit card for class 10
-On the homepage of the website, you will find a link to download the dummy admit card for class 10. Click on it and you will be redirected to a new page where you will have to enter some details.
-How to download BSEB 10th dummy admit card 2023
-BSEB matric dummy admit card 2023 online
-Bihar board 10th exam dummy admit card 2023
-BSEB class 10 dummy admit card 2023 correction
-Bihar board secondary dummy admit card 2023
-BSEB 10th board exam dummy admit card 2023
-Bihar board matriculation dummy admit card 2023
-BSEB class X dummy admit card 2023 download link
-Bihar board 10th annual exam dummy admit card 2023
-BSEB matric exam dummy admit card 2023 pdf
-Bihar board secondary exam dummy admit card 2023
-BSEB class 10 exam dummy admit card 2023 steps
-Bihar board matric exam dummy admit card 2023 date
-BSEB secondary dummy admit card 2023 website
-Bihar board class 10 dummy admit card 2023 check
-BSEB matriculation dummy admit card 2023 release
-Bihar board class X dummy admit card 2023 download
-BSEB secondary exam dummy admit card 2023 online
-Bihar board matric annual exam dummy admit card 2023
-BSEB class X exam dummy admit card 2023 pdf
-Bihar board secondary annual exam dummy admit card 2023
-BSEB matric annual exam dummy admit card 2023 steps
-Bihar board class X annual exam dummy admit card 2023 date
-BSEB secondary annual exam dummy admit card 2023 website
-Bihar board matriculation annual exam dummy admit card 2023 check
-BSEB class X annual exam dummy admit card 2023 release
-Bihar board secondary examination dummy admit card 2023 download
-BSEB matric examination dummy admit card 2023 online
-Bihar board class X examination dummy admit card 2023 pdf
-BSEB secondary examination dummy admit card 2023 steps
-Bihar board matric examination dummy admit card 2023 date
-BSEB class X examination dummy admit card 2023 website
-Bihar board secondary examination dummy admit card 2023 check
-BSEB matric examination dummy admit card 2023 release
-Bihar board class X examination dummy admit card 2023 download link
-BSEB secondary examination dummy admit card correction online
-Bihar board matric examination dummy admit card correction steps
-BSEB class X examination dummy admit card correction date
-Bihar board secondary examination dummy admit card correction website
-BSEB matric examination dummy admit card correction check
-Bihar board class X examination dummy admit card correction release
-BSEB secondary examination dummy registration number online
-Bihar board matric examination dummy registration number steps
-BSEB class X examination dummy registration number date
-Bihar board secondary examination dummy registration number website
-BSEB matric examination dummy registration number check
-Bihar board class X examination dummy registration number release
-BSEB secondary examination dummy registration number download link
-Enter your school code, name, father's name, and date of birth and submit
-You will have to enter your school code, name, father's name, and date of birth as per your registration form. Make sure you enter them correctly and then click on the submit button.
-Your dummy admit card will be displayed on the screen
-After submitting your details, your dummy admit card will be displayed on the screen. You can check all the information and download it in PDF format.
-Download and take a printout of your dummy admit card
-You should download and take a printout of your dummy admit card and keep it safely. You should also check it for any errors or discrepancies and report them to your school administration as soon as possible.
- How to make corrections in the dummy admit card 10th 2023 Bihar Board?
-If you find any mistake in your dummy admit card, you should report it to your school administration as soon as possible.
-If you notice any error or discrepancy in your dummy admit card, such as spelling mistakes, wrong photo or signature, incorrect subjects or exam centre, etc., you should inform your school administration immediately. You should not ignore any mistake as it may affect your final admit card and exam results.
-The school administration can make the required changes in the dummy admit card till August 4, 2022.
-The school administration has the authority to make the necessary corrections in the dummy admit card till August 4, 2022. They can login to the BSEB portal using their user ID and password and make the changes online. They can also contact the BSEB helpline number - 0612-2232074 or email - bsebsehelpdesk@gmail.com for any assistance.
-The final admit card will be issued after the corrections are made and verified by the board.
-The final admit card for class 10 board exams will be issued by BSEB after the corrections are made and verified by the board. The final admit card will have all the correct and updated details of the students. The students should download their final admit card from the official website of BSEB before the exam date and carry it to the exam centre along with their school identity card.
- When will the final admit card and exam date sheet be released by BSEB?
-The final admit card for class 10 board exams will be released on January 8, 2023 on the official website of BSEB.
-The final admit card for class 10 board exams will be available on the official website of BSEB from January 8, 2023 onwards. The students can download their final admit card by entering their registration number or roll number and date of birth. The final admit card will have a barcode and a QR code for verification purposes. The students should check their final admit card carefully and report any discrepancy to their school administration or BSEB immediately.
-The exam date sheet for class 10 board exams has been released on December 9, 2022 on the official website of BSEB.
-The exam date sheet for class 10 board exams has been released by BSEB on December 9, 2022 on its official website. The exam date sheet contains the dates, timings, and subjects of the exams. The students can download the exam date sheet from the website and prepare accordingly. The exam date sheet is also given below in a table format for your convenience.
-
-
-Date |
-Shift |
-Subject |
-
-
-February 14, 2023 |
-Morning (9:30 am to 12:15 pm) |
-Science |
-
-
-February 14, 2023 |
-Afternoon (1:45 pm to 4:30 pm) |
-Science |
-
-
-February 15, 2023 |
-Morning (9:30 am to 12:15 pm) |
-Mathematics |
-
-
-February 15, 2023 |
-Afternoon (1:45 pm to 4:30 pm) |
-Mathematics |
-
-
-February 16, 2023 |
-Morning (9:30 am to 12:15 pm) |
-Social Science |
-
-
-February 16, 2023 |
-Afternoon (1:45 pm to 4:30 pm) |
-Social Science |
-
-
-February 17, 2023 |
-Morning (9:30 am to 12:15 pm) |
-English (General) |
-
-
-February 17, 2023 |
-Afternoon (1:45 pm to 4:30 pm) |
-English (General) |
-
-
-February 18, 2023 |
-Morning (9:30 am to 12:15 pm) |
-Hindi/Urdu/Bangla/Maithili (General) |
-
-
-February 18, 2023 |
-Afternoon (1:45 pm to 4:30 pm) |
-Hindi/Urdu/Bangla/Maithili (General) |
-
- February 19, 2023 |
-Morning (9:30 am to 12:15 pm) |
-Sanskrit/Arabic/Farsi/Bhojpuri (Optional) |
-
-
-February 19, 2023 |
-Afternoon (1:45 pm to 4:30 pm) |
-Sanskrit/Arabic/Farsi/Bhojpuri (Optional) |
-
-
-February 21, 2023 |
-Morning (9:30 am to 12:15 pm) |
-Elective Subjects |
-
-
-February 21, 2023 |
-Afternoon (1:45 pm to 4:30 pm) |
-Elective Subjects |
-
-
-February 22, 2023 |
-Morning (9:30 am to 12:15 pm) |
-Additional Subjects |
-
-
-February 22, 2023 |
-Afternoon (1:45 pm to 4:30 pm) |
-Additional Subjects |
-
-
- What are the important instructions to follow while appearing for the board exams?
-The students should follow these instructions while appearing for the board exams:
-Carry your final admit card, school identity card, and permissible stationery items to the exam centre
-You should carry your final admit card, school identity card, and permissible stationery items such as pen, pencil, eraser, sharpener, ruler, etc. to the exam centre. You should not carry any other item that is not allowed by the board. You should also affix your photo and signature on your final admit card if not already done.
-Reach the exam centre before 10 am as no entry will be allowed after that
-You should reach the exam centre before 10 am as no entry will be allowed after that. You should also follow the seating arrangement and instructions given by the invigilators. You should not change your seat or exam centre without prior permission from the board.
-Wear your school uniform and follow the COVID-19 guidelines
-You should wear your school uniform and follow the COVID-19 guidelines such as wearing a mask, maintaining social distance, using sanitizer, etc. You should also undergo thermal screening and submit a self-declaration form at the entry point of the exam centre. You should not attend the exam if you are unwell or have any symptoms of COVID-19.
- Do not carry any electronic devices, mobile phones, or barred items to the exam centre
-You should not carry any electronic devices, mobile phones, or barred items such as calculator, watch, camera, bluetooth device, earphone, etc. to the exam centre. If you are found in possession of any such item, you will be disqualified from the exam and may face legal action.
- Read and adhere to the rules for unfair practices and avoid any malpractice
-You should read and adhere to the rules for unfair practices and avoid any malpractice such as copying, cheating, impersonation, etc. If you are caught indulging in any such activity, you will be expelled from the exam and may face legal action.
- Refrain from spreading rumours or hosting any material on social media related to the exams
-You should refrain from spreading rumours or hosting any material on social media related to the exams such as question papers, answers, tips, etc. If you are found doing so, you will be liable for legal action.
- Conclusion
- In this article, we have discussed how to download your dummy admit card 10th 2023 Bihar Board online, how to make corrections in it, when to expect your final admit card and exam date sheet, and what are the important instructions to follow while appearing for the board exams. We hope this article has been helpful and informative for you. We wish you all the best for your board exams!
- Frequently Asked Questions (FAQs)
- Q1. What is the official website of BSEB?
- A1. The official website of BSEB is biharboardonline.com. You can visit this website for all the latest updates and notifications regarding the board exams.
- Q2. What is the helpline number and email of BSEB?
- A2. The helpline number of BSEB is 0612-2232074 and the email is bse bsehelpdesk@gmail.com. You can contact them for any queries or issues related to the board exams.
- Q3. What are the elective and additional subjects for class 10 board exams?
- A3. The elective subjects for class 10 board exams are Music, Dance, Fine Arts, Home Science, Commerce, Computer Science, and Physical Education. The additional subjects are Hindi, English, Sanskrit, Urdu, Bangla, Maithili, Arabic, Farsi, and Bhojpuri. You can choose any one elective subject and any one additional subject as per your preference and availability.
- Q4. How can I prepare for the board exams?
- A4. You can prepare for the board exams by following these tips:
-
-- Revise the syllabus thoroughly and practice the previous year papers and sample papers.
-- Focus on the important topics and concepts and clear your doubts with your teachers or peers.
-- Make a study plan and follow it diligently. Allocate sufficient time for each subject and topic.
-- Take regular breaks and avoid stress and distractions. Eat healthy food and drink plenty of water.
-- Review your performance and work on your weak areas. Seek feedback and guidance from your mentors or tutors.
-
- Q5. How can I check my board exam results?
- A5. You can check your board exam results by visiting the official website of BSEB or by using the BSEB app on your mobile phone. You will have to enter your roll number or roll code and date of birth to access your results. You can also get your results via SMS by sending BSEB10 ROLLNUMBER to 56263.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/JoJos Bizarre Adventure in Bleach Vs Naruto How to Install the Mod (Android Only).md b/spaces/congsaPfin/Manga-OCR/logs/JoJos Bizarre Adventure in Bleach Vs Naruto How to Install the Mod (Android Only).md
deleted file mode 100644
index 3cbb64e97a3ba8974ce4c34eea0a1c50a5a1a448..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/JoJos Bizarre Adventure in Bleach Vs Naruto How to Install the Mod (Android Only).md
+++ /dev/null
@@ -1,163 +0,0 @@
-
-Bleach vs Naruto JoJo Mod Download: How to Play with Your Favorite Anime Characters
- Are you a fan of anime and fighting games? Do you love the epic battles and quirky characters of Bleach, Naruto, and JoJo's Bizarre Adventure? If you answered yes, then you are in for a treat. In this article, we will show you how to download and install Bleach vs Naruto JoJo Mod, a fan-made mod that combines these three popular anime series into one amazing game. You will also learn how to play the game and have fun with your favorite anime characters.
-bleach vs naruto jojo mod download
Download ✅ https://urlca.com/2uOb25
- Introduction
- What is Bleach vs Naruto JoJo Mod?
- Bleach vs Naruto JoJo Mod is a mod-pack for Bleach vs Naruto, a free online 2D flash anime fighting game developed by the Chinese company 5Dplay. It is a crossover anime fighting game featuring characters from both Bleach and Naruto Shippuden with guest character Kenshin Himura from Rurouni Kenshin. The game is playable on internet browser, PC, and Android.
- The mod-pack adds characters, assists, and maps from JoJo's Bizarre Adventure, another popular anime series based on the manga by Hirohiko Araki. The series focuses on the mysterious adventures of the Joestar family across generations, from the end of the 19th century to modern times. The mod-pack has been created by different fans, such as MeleeWaluigi and diazynez, who have shared their work on YouTube and other platforms.
- Why should you try it?
- If you are looking for a fun and exciting way to enjoy your favorite anime series, then Bleach vs Naruto JoJo Mod is a great option. Here are some reasons why you should try it:
-
-- You can play with over 50 characters from Bleach, Naruto, and JoJo's Bizarre Adventure, each with their own unique abilities, moves, and transformations. You can also use assist characters to help you in battle.
-- You can explore over 20 maps inspired by the anime settings, such as Konoha Village, Hueco Mundo, Morioh Town, and more. You can also customize the background music and sound effects to suit your mood.
-- You can choose from different game modes, such as Arcade, VS Mode, Team Play, Survival, Training, Watch Mode, and more. You can also adjust the difficulty level, time limit, health bars, and other options to your preference.
-- You can play solo or with your friends online or offline. You can use keyboard or gamepad controls to control your character. You can also watch other players' matches or share your own gameplay videos online.
-- You can experience the thrilling and hilarious moments of the anime series in a new way. You can recreate iconic scenes, such as Ichigo vs Aizen, Naruto vs Sasuke, Jotaro vs Dio, and more. You can also discover new combinations and interactions between the characters, such as Naruto and Josuke, Rukia and Jolyne, Grimmjow and Kira, and more.
-
- As you can see, Bleach vs Naruto JoJo Mod is a game that offers a lot of fun and variety for anime fans. But how can you download and install it on your device? Let's find out in the next section.
-bleach vs naruto 3.3 jojo mod android
-bleach vs naruto jojo mod apk
-bleach vs naruto jojo mod pack
-bleach vs naruto jojo mod v2
-bleach vs naruto jojo mod characters
-bleach vs naruto jojo mod youtube
-bleach vs naruto jojo mod pc
-bleach vs naruto jojo mod online
-bleach vs naruto jojo mod free
-bleach vs naruto jojo mod gameplay
-bleach vs naruto jojo mod tutorial
-bleach vs naruto jojo mod update
-bleach vs naruto jojo mod review
-bleach vs naruto jojo mod link
-bleach vs naruto jojo mod how to install
-bleach vs naruto jojo mod 20+ characters
-bleach vs naruto jojo mod 186mb
-bleach vs naruto jojo mod diazynez
-bleach vs naruto jojo mod meleewaluigi
-bleach vs naruto jojo mod discord
-bleach vs naruto jojo mod ncs music
-bleach vs naruto jojo mod new interface
-bleach vs naruto jojo mod maps and assists
-bleach vs naruto jojo mod cw and 沃特风生水起
-bleach vs naruto jojo mod chinese community
-bleach vs naruto 3.3 with all characters unlocked and new ones added (including JoJo)
-how to download and play bleach vs naruto 3.3 on android with JoJo characters
-best JoJo characters in bleach vs naruto 3.3 (tier list)
-how to add more JoJo characters in bleach vs naruto 3.3 (modding guide)
-how to unlock all JoJo characters in bleach vs naruto 3.3 (cheat codes)
-how to fix bugs and glitches in bleach vs naruto 3.3 JoJo mod (troubleshooting tips)
-how to change the language and settings in bleach vs naruto 3.3 JoJo mod (options menu)
-how to play multiplayer mode in bleach vs naruto 3.3 JoJo mod (online or local)
-how to use special moves and combos in bleach vs naruto 3.3 JoJo mod (controls and commands)
-how to customize your character and team in bleach vs naruto 3.3 JoJo mod (selection screen)
-what are the differences between the original and the JoJo version of bleach vs naruto 3.3 (comparison and analysis)
-what are the advantages and disadvantages of using JoJo characters in bleach vs naruto 3.3 (pros and cons)
-what are the best strategies and tips for winning in bleach vs naruto 3.3 JoJo mod (gameplay advice)
-what are the most popular and requested JoJo characters for bleach vs naruto 3.3 (fan feedback and suggestions)
-what are the future plans and updates for bleach vs naruto 3.3 JoJo mod (development news and announcements)
- How to download and install Bleach vs Naruto JoJo Mod
- Downloading and installing Bleach vs Naruto JoJo Mod is not very difficult, but it requires some steps depending on the device you are using. Here are the instructions for PC and Android users:
- For PC users
- If you want to play Bleach vs Naruto JoJo Mod on your PC, you will need to follow these steps:
- Step 1: Download the mod file from the link provided
- The first thing you need to do is to download the mod file from the link provided by the mod creator. For example, you can use this link to download the latest version of the mod by MeleeWaluigi. The file size is about 1.5 GB, so make sure you have enough space on your hard drive.
- Step 2: Extract the zip file to a folder of your choice
- After downloading the mod file, you will need to extract it using a program like WinRAR or 7-Zip. You can extract it to any folder you want, but make sure you remember where you put it. You should see a folder named Bleach vs Naruto JoJo Mod with several files inside.
- Step 3: Run the game executable file and enjoy
- The last step is to run the game executable file, which is named Bvn.exe. You can double-click on it or right-click and select Run as administrator. The game should start and you should see the main menu. You can now choose your game mode, character, map, and other options. Have fun!
- For Android users
- If you want to play Bleach vs Naruto JoJo Mod on your Android device, you will need to follow these steps:
- Step 1: Download the mod file from the link provided
- The first thing you need to do is to download the mod file from the link provided by the mod creator. For example, you can use this link to download the latest version of the mod by diazynez. The file size is about 1 GB, so make sure you have enough space on your device.
- Step 2: Install the APK file on your device
- After downloading the mod file, you will need to install it on your device. You can do this by tapping on the file or using a file manager app. You may need to enable Unknown sources in your device settings to allow installation of apps from outside the Google Play Store. You should see a screen asking you to confirm the installation. Tap on Install and wait for it to finish.
- Step 3: Run the game app and enjoy
- The last step is to run the game app, which is named Bleach vs Naruto JoJo Mod.apk. You can find it in your app drawer or home screen. The game should start and you should see the main menu. You can now choose your game mode, character, map, and other options. Have fun!
- How to play Bleach vs Naruto JoJo Mod
- Now that you have downloaded and installed Bleach vs Naruto JoJo Mod, you may be wondering how to play it. In this section, we will give you some basic information about the gameplay and controls, the characters and maps, and some tips and tricks.
- The gameplay and controls
- Bleach vs Naruto JoJo Mod is a 2D fighting game that follows the same gameplay as Bleach vs Naruto. You can choose from different game modes, such as Arcade, VS Mode, Team Play, Survival, Training, Watch Mode, and more. You can also adjust the difficulty level, time limit, health bars, and other options to your preference.
- The game supports both keyboard and gamepad controls. You can also customize your own controls in the options menu. Here are the default keyboard controls for PC users:
-
-
-Key |
-Function |
-
-
-WASD |
-Move your character |
-
-
-J |
-Attack |
-
-
-K |
-Jump |
-
-
-L |
-Dodge |
-
-
-U |
-Special attack 1 |
-
-
-I |
-Special attack 2 |
-
-
-O |
-Special attack 3 |
-
-
-P |
-Assist character |
-
-
-S+J, W+J, S+U, W+U, etc. |
-Combo attacks and transformations (depending on the character) |
-
- You can also use the arrow keys and the number keys (1-6) to control your character. The gamepad controls are similar, but you can check them in the options menu.
- The game supports up to four players in one match, either online or offline. You can also watch other players' matches or share your own gameplay videos online.
- The characters and maps
- Bleach vs Naruto JoJo Mod features over 50 characters from Bleach, Naruto, and JoJo's Bizarre Adventure, each with their own unique abilities, moves, and transformations. You can also use assist characters to help you in battle. Here are some examples of the characters you can play with:
-
-- Bleach: Ichigo Kurosaki, Rukia Kuchiki, Orihime Inoue, Uryu Ishida, Renji Abarai, Byakuya Kuchiki, Kenpachi Zaraki, Toshiro Hitsugaya, Grimmjow Jaegerjaquez, Ulquiorra Cifer, Sosuke Aizen, and more.
- - Naruto: Naruto Uzumaki, Sasuke Uchiha, Sakura Haruno, Kakashi Hatake, Gaara, Rock Lee, Neji Hyuga, Hinata Hyuga, Itachi Uchiha, Madara Uchiha, Obito Uchiha, and more.
- - JoJo's Bizarre Adventure: Jonathan Joestar, Joseph Joestar, Jotaro Kujo, Josuke Higashikata, Giorno Giovanna, Jolyne Cujoh, Johnny Joestar, Dio Brando, Kars, Yoshikage Kira, Diavolo, and more.
- - Guest: Kenshin Himura from Rurouni Kenshin.
- You can explore over 20 maps inspired by the anime settings, such as Konoha Village, Hueco Mundo, Morioh Town, and more. You can also customize the background music and sound effects to suit your mood.
- The tips and tricks
- If you want to improve your skills and have more fun with Bleach vs Naruto JoJo Mod, here are some tips and tricks you can use:
-
- - Learn the strengths and weaknesses of each character. Some characters are faster, stronger, or more durable than others. Some characters have ranged attacks, area attacks, or special abilities that can give them an edge in certain situations. Try to use the character that suits your playstyle and strategy.
- - Use the dodge and block buttons wisely. Dodging and blocking can help you avoid taking damage from your opponent's attacks. However, they also consume your energy bar, which you need to use your special attacks. Therefore, you should balance your defense and offense and not spam these buttons.
- - Use your assist character at the right time. Your assist character can help you in various ways, such as attacking, defending, healing, or buffing you. However, you can only use them once per match, so you should save them for when you really need them. For example, you can use them to finish off a low-health opponent, to escape from a tight spot, or to turn the tide of the battle.
- - Experiment with different combos and transformations. Each character has different combo attacks and transformations that can enhance their performance and deal more damage. You can perform these by pressing certain key combinations, such as S+J, W+J, S+U, W+U, etc. You can also check the character's moves list in the pause menu. Try to learn and master these moves to unleash your full potential.
- - Have fun and enjoy the game. The most important tip is to have fun and enjoy the game. Bleach vs Naruto JoJo Mod is a game that is meant to entertain and amuse anime fans. You can play it casually or competitively, alone or with friends, online or offline. You can also watch other players' matches or share your own gameplay videos online. The game is constantly updated with new characters, maps, and features, so you can always expect something new and exciting.
-
- Conclusion
- In conclusion, Bleach vs Naruto JoJo Mod is a fan-made mod that combines three popular anime series into one amazing game. You can download and install it on your PC or Android device and play with over 50 characters from Bleach, Naruto, and JoJo's Bizarre Adventure. You can also explore over 20 maps inspired by the anime settings and choose from different game modes and options. You can also learn how to play the game and use some tips and tricks to improve your skills and have more fun.
- If you are a fan of anime and fighting games, then you should definitely try Bleach vs Naruto JoJo Mod. It is a game that will keep you entertained and amused for hours. You can also share your experience with other anime fans online or offline. So what are you waiting for? Download the mod now and enjoy!
- FAQs
- Here are some frequently asked questions about Bleach vs Naruto JoJo Mod:
-
-- Is Bleach vs Naruto JoJo Mod free?
-Yes, Bleach vs Naruto JoJo Mod is free to download and play. However, you may need to support the mod creators by watching their ads or donating to their channels.
-- Is Bleach vs Naruto JoJo Mod safe?
-Yes, Bleach vs Naruto JoJo Mod is safe to download and install on your device. However, you should always scan the mod file with an antivirus program before opening it. You should also only download the mod from trusted sources, such as the links provided by the mod creators.
-- Is Bleach vs Naruto JoJo Mod legal?
-Bleach vs Naruto JoJo Mod is a fan-made mod that uses assets from Bleach, Naruto, and JoJo's Bizarre Adventure, which are owned by their respective creators and publishers. The mod does not intend to infringe any copyrights or trademarks of these series. The mod is made for entertainment purposes only and does not generate any profit. However, the mod may be subject to removal or legal action by the original owners if they deem it inappropriate or harmful to their interests.
- - How can I support the mod creators?
-If you enjoy Bleach vs Naruto JoJo Mod and want to support the mod creators, you can do so by watching their ads, subscribing to their channels, liking and commenting on their videos, sharing their links, or donating to their accounts. You can also give them feedback and suggestions on how to improve the mod or add new features. You can find their contact information on their websites or social media platforms.
- - How can I report bugs or errors in the mod?
-If you encounter any bugs or errors in the mod, such as crashes, glitches, missing textures, broken sounds, etc., you can report them to the mod creators by sending them a message or leaving a comment on their videos. You should also provide them with details about the bug or error, such as when and where it happened, what device and version you are using, what characters and maps you are playing with, etc. You should also attach a screenshot or a video of the bug or error if possible.
- - How can I request new characters or maps for the mod?
-If you have any ideas or requests for new characters or maps for the mod, you can share them with the mod creators by sending them a message or leaving a comment on their videos. You should also provide them with reasons why you want them to add your requested character or map, such as how they fit the theme of the mod, what abilities or moves they have, what anime series they are from, etc. You should also attach a picture or a link of your requested character or map if possible.
-
- I hope this article has answered your questions about Bleach vs Naruto JoJo Mod. If you have any more questions, feel free to ask me in the chat box below. Thank you for reading and have a nice day!
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Kipas Roblox APK A Guide to the Most Popular and Fun Experiences on Roblox for Android.md b/spaces/congsaPfin/Manga-OCR/logs/Kipas Roblox APK A Guide to the Most Popular and Fun Experiences on Roblox for Android.md
deleted file mode 100644
index e5d68ae19a17e96f778d7d73ea6c1a9353a38b77..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Kipas Roblox APK A Guide to the Most Popular and Fun Experiences on Roblox for Android.md
+++ /dev/null
@@ -1,120 +0,0 @@
-
-Kipas Roblox APK: What Is It and How to Download It
-Roblox is one of the most popular online gaming platforms in the world, with millions of players creating and exploring an infinite variety of immersive experiences. However, not everyone can afford to buy Robux, the in-game currency that allows you to purchase premium items, accessories, and game passes. That's why some people resort to using unofficial apps or hacks that promise to give them free Robux or other benefits in Roblox. One of these apps is called Kipas Roblox APK.
-In this article, we will explain what Kipas Roblox APK is, how it works, and how you can download it. We will also discuss the pros and cons of using this app, as well as some alternatives that you might want to consider. By the end of this article, you will have a better understanding of Kipas Roblox APK and whether it is worth trying or not.
-kipas roblox apk
Download Zip ✸✸✸ https://urlca.com/2uO7VV
- What Is Kipas Roblox APK?
-Definition
-Kipas Roblox APK is an Android application that claims to offer free Robux and other advantages in Roblox. The name "Kipas" comes from the Indonesian word for "fan", which implies that this app is made by or for fans of Roblox. The app is not available on the official Google Play Store, but can be downloaded from third-party websites or sources.
-Benefits
-The main benefit of using Kipas Roblox APK is that it supposedly gives you unlimited Robux without spending any real money. This means that you can buy any item, accessory, or game pass that you want in Roblox without any limitations. You can also use the app to access premium features that are normally reserved for paid members, such as creating groups, joining exclusive games, and more.
-Risks
-However, using Kipas Roblox APK also comes with some serious risks that you should be aware of before downloading it. First of all, the app is not authorized or endorsed by Roblox Corporation, which means that it violates their terms of service and could result in your account being suspended or banned. Secondly, the app could contain malware or viruses that could harm your device or steal your personal information. Thirdly, the app could expose you to legal issues if you are caught using it or distributing it to others.
- How to Download Kipas Roblox APK?
-Requirements
-If you still want to try Kipas Roblox APK despite the risks involved, you will need to meet some minimum requirements before downloading and installing it. These include:
-
-- An Android device running on version 5.0 or higher
-- At
- At least 100 MB of free storage space on your device
-- A stable internet connection
-- A valid Roblox account
-
-Steps
-Once you have met the requirements, you can follow these steps to download and install Kipas Roblox APK:
-
-- Go to a reliable website that offers Kipas Roblox APK, such as [kipasroblox.com] or [apkfab.com].
-- Click on the download button and wait for the file to be downloaded on your device.
-- Go to your device settings and enable the option to install apps from unknown sources.
-- Locate the downloaded file and tap on it to start the installation process.
-- Follow the instructions on the screen and grant the necessary permissions to the app.
-- Launch the app and log in with your Roblox account.
-- Enjoy your free Robux and other benefits in Roblox.
-
-Tips
-To use Kipas Roblox APK safely and effectively, here are some tips that you should keep in mind:
-kipas roblox apk download
-kipas roblox apk mod
-kipas roblox apk latest version
-kipas roblox apk 2023
-kipas roblox apk free
-kipas roblox apk android
-kipas roblox apk update
-kipas roblox apk hack
-kipas roblox apk unlimited robux
-kipas roblox apk offline
-kipas roblox apk for pc
-kipas roblox apk online
-kipas roblox apk no verification
-kipas roblox apk old version
-kipas roblox apk new version
-kipas roblox apk premium
-kipas roblox apk pro
-kipas roblox apk full version
-kipas roblox apk cracked
-kipas roblox apk cheat
-kipas roblox apk generator
-kipas roblox apk installer
-kipas roblox apk file
-kipas roblox apk obb
-kipas roblox apk data
-kipas roblox apk review
-kipas roblox apk gameplay
-kipas roblox apk features
-kipas roblox apk tips
-kipas roblox apk tricks
-kipas roblox apk guide
-kipas roblox apk tutorial
-kipas roblox apk wiki
-kipas roblox apk reddit
-kipas roblox apk forum
-kipas roblox apk discord
-kipas roblox apk youtube
-kipas roblox apk facebook
-kipas roblox apk twitter
-kipas roblox apk instagram
-kipas roblox apk tiktok
-kipas roblox apk pinterest
-kipas roblox apk quora
-kipas roblox apk medium
-kipas roblox apk blogspot
-kipas roblox apk wordpress
-kipas roblox apk wixsite
-kipas roblox apk weebly
-kipas roblox apk squarespace
-
-- Back up your data before using the app, in case something goes wrong or you lose your account.
-- Use a VPN to hide your IP address and location, to avoid being detected or tracked by Roblox or other authorities.
-- Avoid clicking on suspicious links or ads that might redirect you to malicious websites or download unwanted software.
-- Do not share your account details or personal information with anyone, especially strangers or people who claim to be from Roblox or Kipas Roblox APK.
-- Do not abuse the app or use it too frequently, as this might raise suspicion or trigger anti-cheat mechanisms in Roblox.
-
- Alternatives to Kipas Roblox APK
-Comparison
-Kipas Roblox APK is not the only app or method that claims to offer free Robux or other advantages in Roblox. There are many other options that you might have heard of or tried, such as:
-
-Name | Description | Pros | Cons |
-Robux Generator | A website or tool that generates free Robux for your account. | No download or installation required. Easy to use. Instant results. | Most likely fake or scam. Could steal your account or personal information. Could infect your device with malware or viruses. Could get you banned from Roblox. |
-Robux Hack | A software or script that modifies the game code or data to give you free Robux or other benefits. | No need to spend real money. Could unlock premium features or items. Could give you an edge over other players. | Illegal and unethical. Could damage your device or game files. Could expose you to legal issues. Could get you banned from Roblox. |
-Robux Survey | A website or app that rewards you with free Robux for completing surveys, tasks, offers, or watching videos. | No need to download or install anything. No risk of malware or viruses. No risk of account suspension or ban. | Time-consuming and tedious. Low payout rate. Could require personal information or payment details. Could spam you with ads or emails. |
-Robux Gift Card | A physical or digital card that contains a code that can be redeemed for a certain amount of Robux. | No need to use any app or hack. Legitimate and safe. Can be bought online or offline. Can be gifted to others. | Costs real money. Limited availability. Could expire or be invalid. Could be stolen or lost. |
-
- Recommendation
-Based on our comparison, we recommend that you avoid using Kipas Roblox APK or any other app or hack that claims to give you free Robux or other benefits in Roblox. These methods are not only risky, but also unfair and dishonest. They could ruin your gaming experience and reputation, as well as harm your device and data.
-The best alternative to Kipas Roblox APK is to use a legitimate and safe method to earn Robux, such as buying a Robux gift card, completing a Robux survey, or creating and selling your own items or games in Roblox. These methods are not only legal and ethical, but also rewarding and fun. They could enhance your gaming experience and reputation, as well as help you support the developers and creators of Roblox.
- Conclusion
-Kipas Roblox APK is an Android application that claims to offer free Robux and other advantages in Roblox. However, it is not a safe or reliable method to use, as it could expose you to various risks, such as malware, account suspension, or legal issues. Moreover, it is not a fair or honest way to play Roblox, as it violates the terms of service and the spirit of the game.
-The best way to enjoy Roblox is to use a legitimate and safe method to earn Robux, such as buying a Robux gift card, completing a Robux survey, or creating and selling your own items or games in Roblox. These methods are not only legal and ethical, but also rewarding and fun. They could enhance your gaming experience and reputation, as well as help you support the developers and creators of Roblox.
-We hope that this article has helped you understand what Kipas Roblox APK is and how to download it. We also hope that you have learned why you should avoid using it or any other app or hack that claims to give you free Robux or other benefits in Roblox. If you have any questions, comments, or feedback, please feel free to share them with us below.
- FAQs
-Q: Is Kipas Roblox APK safe?
-A: No, Kipas Roblox APK is not safe. It could contain malware or viruses that could harm your device or steal your personal information. It could also get you banned from Roblox or expose you to legal issues.
-Q: Is Kipas Roblox APK legal?
-A: No, Kipas Roblox APK is not legal. It violates the terms of service of Roblox and the intellectual property rights of the developers and creators of Roblox. It could also infringe on the laws of your country or region.
-Q: Is Kipas Roblox APK ethical?
-A: No, Kipas Roblox APK is not ethical. It is unfair and dishonest to use an app or hack that gives you an unfair advantage over other players or cheats the system. It also undermines the hard work and creativity of the developers and creators of Roblox.
-Q: How can I get free Robux without using Kipas Roblox APK?
-A: You can get free Robux without using Kipas Roblox APK by using a legitimate and safe method, such as buying a Robux gift card, completing a Robux survey, or creating and selling your own items or games in Roblox.
-Q: Where can I find more information about Kipas Roblox APK?
-A: You can find more information about Kipas Roblox APK by visiting the official website of the app, [kipasroblox.com], or by reading reviews and testimonials from other users who have tried it.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Whats New in Free Fire MAX 7.0 APK? Find Out the Latest Features and Improvements.md b/spaces/congsaPfin/Manga-OCR/logs/Whats New in Free Fire MAX 7.0 APK? Find Out the Latest Features and Improvements.md
deleted file mode 100644
index 8d65bd5babafbd3bcd41de59f2b047bcc3275c20..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Whats New in Free Fire MAX 7.0 APK? Find Out the Latest Features and Improvements.md
+++ /dev/null
@@ -1,106 +0,0 @@
-
-Free Fire MAX 7.0 APK: Everything You Need to Know
-If you are a fan of battle royale games, you might have heard of Free Fire MAX, the enhanced version of the popular Free Fire game. Free Fire MAX is designed to deliver a premium gameplay experience with ultra HD graphics, exclusive features, and various game modes. In this article, we will tell you everything you need to know about Free Fire MAX 7.0 APK, the latest update of the game that brings new characters, weapons, vehicles, events, and rewards. Read on to find out how to download and install Free Fire MAX 7.0 APK on your Android device and enjoy the ultimate battle royale experience.
- What is Free Fire MAX?
-Free Fire MAX is a battle royale game developed by Garena International, the same company that created Free Fire. Free Fire MAX is a separate app from Free Fire, but it allows you to play with all Free Fire players via the exclusive Firelink technology. You can also login with your existing Free Fire account and access all your progress and items across both apps in real-time.
-free fire max 7.0 apk
Download Zip ✏ https://urlca.com/2uO6qp
-Free Fire MAX offers a variety of exciting game modes, such as classic, ranked, clash squad, rush hour, and more. You can also explore different maps, such as Bermuda, Kalahari, Purgatory, and more. The game supports up to 50 players in a match, where you have to parachute onto a deserted island, scavenge for weapons and supplies, and fight for survival. The last one standing wins the game.
- Features of Free Fire MAX
-Free Fire MAX has many features that make it stand out from other battle royale games. Here are some of them:
- Ultra HD graphics and effects
-Free Fire MAX boasts ultra HD graphics and effects that create a realistic and immersive survival experience. You can enjoy stunning visuals, such as dynamic lighting, shadows, reflections, textures, and more. You can also customize the graphics settings according to your device's performance.
- Exclusive Firelink technology
-Free Fire MAX uses the exclusive Firelink technology that allows you to play with all Free Fire players regardless of which app they use. You can also login with your existing Free Fire account and access all your progress and items across both apps in real-time. This means you don't have to worry about losing your data or starting from scratch.
- Various game modes and maps
-Free Fire MAX offers a variety of exciting game modes that cater to different preferences and play styles. You can choose from classic, ranked, clash squad, rush hour, and more. You can also explore different maps, such as Bermuda, Kalahari, Purgatory, and more. Each map has its own features and challenges that require different strategies and skills.
- How to download and install Free Fire MAX 7.0 APK?
-If you want to download and install Free Fire MAX 7.0 APK on your Android device, you need to follow these steps:
- Requirements and compatibility
-Before you download and install Free Fire MAX 7.0 APK, you need to make sure that your device meets the following requirements:
-Free Fire Max 7.0 APK is a modified version of the popular battle royale game Free Fire, which offers enhanced graphics, sound effects, and gameplay features.
-Free Fire Max 7.0 APK is not officially released by Garena, the developer of Free Fire, and may contain malware or viruses that can harm your device or compromise your data.
-Free Fire Max 7.0 APK is not compatible with the original Free Fire game, and may cause errors or crashes if you try to play with other players who are using the official version.
-Free Fire Max 7.0 APK may violate the terms of service of Garena and result in your account being banned or suspended if you are caught using it.
-
-- Your device must have Android version 4.1 or higher.
-- Your device must have at least 2 GB of RAM.
-- Your device must have at least 1 GB of free storage space.
-- Your device must have a stable internet connection.
-
-You also need to check if your device is compatible with Free Fire MAX by visiting this link: [^1
Steps to download and install
-After you have verified that your device meets the requirements and is compatible with Free Fire MAX, you can proceed to download and install Free Fire MAX 7.0 APK by following these steps:
-
-- Go to this link: (https://free-fire-max.en.uptodown.com/android/download) and click on the "Download" button to download the Free Fire MAX 7.0 APK file.
-- Once the download is complete, locate the APK file in your device's file manager and tap on it to install it. You may need to enable the "Unknown sources" option in your device's settings to allow the installation of apps from third-party sources.
-- After the installation is done, launch the Free Fire MAX app and login with your existing Free Fire account or create a new one.
-- Enjoy playing Free Fire MAX 7.0 on your Android device.
-
- Tips and tricks to optimize performance
-If you want to enjoy the best performance of Free Fire MAX 7.0 on your Android device, you can follow these tips and tricks:
-
-- Close any background apps that are not in use to free up RAM and CPU resources.
-- Adjust the graphics settings according to your device's capabilities. You can lower the resolution, frame rate, and effects to improve the smoothness and reduce lag.
-- Use a reliable and fast internet connection to avoid network issues and latency.
-- Clear the cache and data of the Free Fire MAX app regularly to avoid glitches and errors.
-- Update the Free Fire MAX app whenever a new version is available to enjoy the latest features and bug fixes.
-
- What's new in Free Fire MAX 7.0 APK?
-Free Fire MAX 7.0 APK is the latest update of the game that brings many new features and improvements. Here are some of them:
- New characters and pets
-Free Fire MAX 7.0 APK introduces two new characters and two new pets that you can unlock and use in the game. The new characters are:
-
-- Kelly "The Swift": A sprinter who can run faster than anyone else. Her passive skill increases her movement speed by 1% for every level up, up to a maximum of 6%.
-- Miguel "The Captain": A former special forces captain who can survive any situation. His passive skill allows him to gain 40 EP for every kill, up to a maximum of 200 EP.
-
-The new pets are:
-
-- Panda: A cute and cuddly companion who can heal you when you are injured. His skill restores 4 HP for every kill, up to a maximum of 10 HP.
-- Falco: A majestic bird who can boost your gliding speed when you parachute onto the island. His skill increases your gliding speed by 15% and your diving speed by 25% for 45 seconds after skydiving.
-
- New weapons and vehicles
-Free Fire MAX 7.0 APK also adds two new weapons and two new vehicles that you can find and use in the game. The new weapons are:
-
-- M82B: A powerful sniper rifle that can penetrate through walls and deal massive damage. It has a magazine capacity of 8 bullets and a reload time of 3 seconds.
-- Kord: A heavy machine gun that can fire in two modes: normal mode and deploy mode. In normal mode, it has a high rate of fire but low accuracy. In deploy mode, it has a lower rate of fire but higher accuracy and damage.
-
-The new vehicles are:
-
-- Sports Car: A fast and stylish car that can seat up to four players. It has a high speed but low durability.
-- Moto Glider: A unique vehicle that can fly in the air using a glider attached to a motorcycle. It has a low speed but high mobility.
-
- New events and rewards
-Free Fire MAX 7.0 APK also brings many new events and rewards that you can participate in and claim in the game. Some of them are:
-
-- Login Event: Login for seven consecutive days during the event period and get various rewards, such as gold, diamonds, vouchers, crates, skins, and more.
-- Lucky Draw
-- Lucky Draw: Spend diamonds to draw and get a chance to win exclusive items, such as the M82B Dragon's Breath skin, the Kord Frost Sabertooth skin, the Panda Gentleman skin, and more.
-- Top Up Event: Top up a certain amount of diamonds during the event period and get bonus rewards, such as the Sports Car Racer skin, the Moto Glider Racer skin, the Falco Skyline skin, and more.
-
- Conclusion
-Free Fire MAX 7.0 APK is the latest update of the game that offers a premium gameplay experience with ultra HD graphics, exclusive features, and various game modes. You can download and install Free Fire MAX 7.0 APK on your Android device by following the steps in this article. You can also enjoy the new characters, weapons, vehicles, events, and rewards that are added in this update. Free Fire MAX 7.0 APK is a must-try for all battle royale fans who want to experience the ultimate survival challenge.
- FAQs
-Here are some frequently asked questions about Free Fire MAX 7.0 APK:
-
-- Is Free Fire MAX 7.0 APK safe to download and install?
-Yes, Free Fire MAX 7.0 APK is safe to download and install as long as you use a trusted source, such as the link provided in this article. You should avoid downloading Free Fire MAX 7.0 APK from unknown or suspicious websites that may contain malware or viruses.
-- Can I play Free Fire MAX 7.0 APK on PC?
-Yes, you can play Free Fire MAX 7.0 APK on PC by using an Android emulator, such as BlueStacks, NoxPlayer, or LDPlayer. You can download and install any of these emulators on your PC and then follow the same steps as you would on your Android device to download and install Free Fire MAX 7.0 APK.
-- Can I play Free Fire MAX 7.0 APK with my friends who use Free Fire?
-Yes, you can play Free Fire MAX 7.0 APK with your friends who use Free Fire thanks to the exclusive Firelink technology that allows cross-play between both apps. You can also login with your existing Free Fire account and access all your progress and items across both apps in real-time.
-- How can I update Free Fire MAX 7.0 APK to the latest version?
-You can update Free Fire MAX 7.0 APK to the latest version by following these steps:
-
-- Launch the Free Fire MAX app and go to the main menu.
-- Tap on the "Settings" icon on the top right corner of the screen.
-- Tap on the "Update" button on the bottom of the screen.
-- Follow the instructions on the screen to download and install the latest version of Free Fire MAX 7.0 APK.
-
-- What are some alternatives to Free Fire MAX 7.0 APK?
-If you are looking for some alternatives to Free Fire MAX 7.0 APK, you can try these games:
-
-- PUBG Mobile: A popular battle royale game that features realistic graphics, diverse weapons, vehicles, and maps, and various game modes.
-- Call of Duty: Mobile: A popular first-person shooter game that features iconic characters, weapons, maps, and modes from the Call of Duty franchise.
-- Fortnite: A popular battle royale game that features cartoonish graphics, creative building mechanics, unique weapons and items, and various game modes.
-
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/contluForse/HuggingGPT/assets/Autocom Delphi Keygen 2013 R2 23.md b/spaces/contluForse/HuggingGPT/assets/Autocom Delphi Keygen 2013 R2 23.md
deleted file mode 100644
index 552800a4b12ab0e3d559494388fb5d15f5431095..0000000000000000000000000000000000000000
--- a/spaces/contluForse/HuggingGPT/assets/Autocom Delphi Keygen 2013 R2 23.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Autocom Delphi Keygen 2013 R2 23
Download ➡ https://ssurll.com/2uzvQ0
-
-R3 How to install · Autocom & Delphi 2013 R1 Activator patch Works ... Patch · How to install Autocom Delphi R2.2013 + Key activation full ... Joined: Apr 23, 2013. Messages: 12. Likes: 0. Apr 23, 2013 · #3. Gallons: 1,720. Apr 23, 2013 · #3. thanks Keygen for Autocom/delphi 2013.1 Do not have an image. 1fdad05405
-
-
-
diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/pidinet/model.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/pidinet/model.py
deleted file mode 100644
index d7dc6b8eccd945640e1aa8c35916df79a2fa69f9..0000000000000000000000000000000000000000
--- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/pidinet/model.py
+++ /dev/null
@@ -1,654 +0,0 @@
-"""
-Author: Zhuo Su, Wenzhe Liu
-Date: Feb 18, 2021
-"""
-
-import math
-
-import cv2
-import numpy as np
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from basicsr.utils import img2tensor
-
-nets = {
- 'baseline': {
- 'layer0': 'cv',
- 'layer1': 'cv',
- 'layer2': 'cv',
- 'layer3': 'cv',
- 'layer4': 'cv',
- 'layer5': 'cv',
- 'layer6': 'cv',
- 'layer7': 'cv',
- 'layer8': 'cv',
- 'layer9': 'cv',
- 'layer10': 'cv',
- 'layer11': 'cv',
- 'layer12': 'cv',
- 'layer13': 'cv',
- 'layer14': 'cv',
- 'layer15': 'cv',
- },
- 'c-v15': {
- 'layer0': 'cd',
- 'layer1': 'cv',
- 'layer2': 'cv',
- 'layer3': 'cv',
- 'layer4': 'cv',
- 'layer5': 'cv',
- 'layer6': 'cv',
- 'layer7': 'cv',
- 'layer8': 'cv',
- 'layer9': 'cv',
- 'layer10': 'cv',
- 'layer11': 'cv',
- 'layer12': 'cv',
- 'layer13': 'cv',
- 'layer14': 'cv',
- 'layer15': 'cv',
- },
- 'a-v15': {
- 'layer0': 'ad',
- 'layer1': 'cv',
- 'layer2': 'cv',
- 'layer3': 'cv',
- 'layer4': 'cv',
- 'layer5': 'cv',
- 'layer6': 'cv',
- 'layer7': 'cv',
- 'layer8': 'cv',
- 'layer9': 'cv',
- 'layer10': 'cv',
- 'layer11': 'cv',
- 'layer12': 'cv',
- 'layer13': 'cv',
- 'layer14': 'cv',
- 'layer15': 'cv',
- },
- 'r-v15': {
- 'layer0': 'rd',
- 'layer1': 'cv',
- 'layer2': 'cv',
- 'layer3': 'cv',
- 'layer4': 'cv',
- 'layer5': 'cv',
- 'layer6': 'cv',
- 'layer7': 'cv',
- 'layer8': 'cv',
- 'layer9': 'cv',
- 'layer10': 'cv',
- 'layer11': 'cv',
- 'layer12': 'cv',
- 'layer13': 'cv',
- 'layer14': 'cv',
- 'layer15': 'cv',
- },
- 'cvvv4': {
- 'layer0': 'cd',
- 'layer1': 'cv',
- 'layer2': 'cv',
- 'layer3': 'cv',
- 'layer4': 'cd',
- 'layer5': 'cv',
- 'layer6': 'cv',
- 'layer7': 'cv',
- 'layer8': 'cd',
- 'layer9': 'cv',
- 'layer10': 'cv',
- 'layer11': 'cv',
- 'layer12': 'cd',
- 'layer13': 'cv',
- 'layer14': 'cv',
- 'layer15': 'cv',
- },
- 'avvv4': {
- 'layer0': 'ad',
- 'layer1': 'cv',
- 'layer2': 'cv',
- 'layer3': 'cv',
- 'layer4': 'ad',
- 'layer5': 'cv',
- 'layer6': 'cv',
- 'layer7': 'cv',
- 'layer8': 'ad',
- 'layer9': 'cv',
- 'layer10': 'cv',
- 'layer11': 'cv',
- 'layer12': 'ad',
- 'layer13': 'cv',
- 'layer14': 'cv',
- 'layer15': 'cv',
- },
- 'rvvv4': {
- 'layer0': 'rd',
- 'layer1': 'cv',
- 'layer2': 'cv',
- 'layer3': 'cv',
- 'layer4': 'rd',
- 'layer5': 'cv',
- 'layer6': 'cv',
- 'layer7': 'cv',
- 'layer8': 'rd',
- 'layer9': 'cv',
- 'layer10': 'cv',
- 'layer11': 'cv',
- 'layer12': 'rd',
- 'layer13': 'cv',
- 'layer14': 'cv',
- 'layer15': 'cv',
- },
- 'cccv4': {
- 'layer0': 'cd',
- 'layer1': 'cd',
- 'layer2': 'cd',
- 'layer3': 'cv',
- 'layer4': 'cd',
- 'layer5': 'cd',
- 'layer6': 'cd',
- 'layer7': 'cv',
- 'layer8': 'cd',
- 'layer9': 'cd',
- 'layer10': 'cd',
- 'layer11': 'cv',
- 'layer12': 'cd',
- 'layer13': 'cd',
- 'layer14': 'cd',
- 'layer15': 'cv',
- },
- 'aaav4': {
- 'layer0': 'ad',
- 'layer1': 'ad',
- 'layer2': 'ad',
- 'layer3': 'cv',
- 'layer4': 'ad',
- 'layer5': 'ad',
- 'layer6': 'ad',
- 'layer7': 'cv',
- 'layer8': 'ad',
- 'layer9': 'ad',
- 'layer10': 'ad',
- 'layer11': 'cv',
- 'layer12': 'ad',
- 'layer13': 'ad',
- 'layer14': 'ad',
- 'layer15': 'cv',
- },
- 'rrrv4': {
- 'layer0': 'rd',
- 'layer1': 'rd',
- 'layer2': 'rd',
- 'layer3': 'cv',
- 'layer4': 'rd',
- 'layer5': 'rd',
- 'layer6': 'rd',
- 'layer7': 'cv',
- 'layer8': 'rd',
- 'layer9': 'rd',
- 'layer10': 'rd',
- 'layer11': 'cv',
- 'layer12': 'rd',
- 'layer13': 'rd',
- 'layer14': 'rd',
- 'layer15': 'cv',
- },
- 'c16': {
- 'layer0': 'cd',
- 'layer1': 'cd',
- 'layer2': 'cd',
- 'layer3': 'cd',
- 'layer4': 'cd',
- 'layer5': 'cd',
- 'layer6': 'cd',
- 'layer7': 'cd',
- 'layer8': 'cd',
- 'layer9': 'cd',
- 'layer10': 'cd',
- 'layer11': 'cd',
- 'layer12': 'cd',
- 'layer13': 'cd',
- 'layer14': 'cd',
- 'layer15': 'cd',
- },
- 'a16': {
- 'layer0': 'ad',
- 'layer1': 'ad',
- 'layer2': 'ad',
- 'layer3': 'ad',
- 'layer4': 'ad',
- 'layer5': 'ad',
- 'layer6': 'ad',
- 'layer7': 'ad',
- 'layer8': 'ad',
- 'layer9': 'ad',
- 'layer10': 'ad',
- 'layer11': 'ad',
- 'layer12': 'ad',
- 'layer13': 'ad',
- 'layer14': 'ad',
- 'layer15': 'ad',
- },
- 'r16': {
- 'layer0': 'rd',
- 'layer1': 'rd',
- 'layer2': 'rd',
- 'layer3': 'rd',
- 'layer4': 'rd',
- 'layer5': 'rd',
- 'layer6': 'rd',
- 'layer7': 'rd',
- 'layer8': 'rd',
- 'layer9': 'rd',
- 'layer10': 'rd',
- 'layer11': 'rd',
- 'layer12': 'rd',
- 'layer13': 'rd',
- 'layer14': 'rd',
- 'layer15': 'rd',
- },
- 'carv4': {
- 'layer0': 'cd',
- 'layer1': 'ad',
- 'layer2': 'rd',
- 'layer3': 'cv',
- 'layer4': 'cd',
- 'layer5': 'ad',
- 'layer6': 'rd',
- 'layer7': 'cv',
- 'layer8': 'cd',
- 'layer9': 'ad',
- 'layer10': 'rd',
- 'layer11': 'cv',
- 'layer12': 'cd',
- 'layer13': 'ad',
- 'layer14': 'rd',
- 'layer15': 'cv',
- },
- }
-
-def createConvFunc(op_type):
- assert op_type in ['cv', 'cd', 'ad', 'rd'], 'unknown op type: %s' % str(op_type)
- if op_type == 'cv':
- return F.conv2d
-
- if op_type == 'cd':
- def func(x, weights, bias=None, stride=1, padding=0, dilation=1, groups=1):
- assert dilation in [1, 2], 'dilation for cd_conv should be in 1 or 2'
- assert weights.size(2) == 3 and weights.size(3) == 3, 'kernel size for cd_conv should be 3x3'
- assert padding == dilation, 'padding for cd_conv set wrong'
-
- weights_c = weights.sum(dim=[2, 3], keepdim=True)
- yc = F.conv2d(x, weights_c, stride=stride, padding=0, groups=groups)
- y = F.conv2d(x, weights, bias, stride=stride, padding=padding, dilation=dilation, groups=groups)
- return y - yc
- return func
- elif op_type == 'ad':
- def func(x, weights, bias=None, stride=1, padding=0, dilation=1, groups=1):
- assert dilation in [1, 2], 'dilation for ad_conv should be in 1 or 2'
- assert weights.size(2) == 3 and weights.size(3) == 3, 'kernel size for ad_conv should be 3x3'
- assert padding == dilation, 'padding for ad_conv set wrong'
-
- shape = weights.shape
- weights = weights.view(shape[0], shape[1], -1)
- weights_conv = (weights - weights[:, :, [3, 0, 1, 6, 4, 2, 7, 8, 5]]).view(shape) # clock-wise
- y = F.conv2d(x, weights_conv, bias, stride=stride, padding=padding, dilation=dilation, groups=groups)
- return y
- return func
- elif op_type == 'rd':
- def func(x, weights, bias=None, stride=1, padding=0, dilation=1, groups=1):
- assert dilation in [1, 2], 'dilation for rd_conv should be in 1 or 2'
- assert weights.size(2) == 3 and weights.size(3) == 3, 'kernel size for rd_conv should be 3x3'
- padding = 2 * dilation
-
- shape = weights.shape
- if weights.is_cuda:
- buffer = torch.cuda.FloatTensor(shape[0], shape[1], 5 * 5).fill_(0)
- else:
- buffer = torch.zeros(shape[0], shape[1], 5 * 5)
- weights = weights.view(shape[0], shape[1], -1)
- buffer[:, :, [0, 2, 4, 10, 14, 20, 22, 24]] = weights[:, :, 1:]
- buffer[:, :, [6, 7, 8, 11, 13, 16, 17, 18]] = -weights[:, :, 1:]
- buffer[:, :, 12] = 0
- buffer = buffer.view(shape[0], shape[1], 5, 5)
- y = F.conv2d(x, buffer, bias, stride=stride, padding=padding, dilation=dilation, groups=groups)
- return y
- return func
- else:
- print('impossible to be here unless you force that')
- return None
-
-class Conv2d(nn.Module):
- def __init__(self, pdc, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=False):
- super(Conv2d, self).__init__()
- if in_channels % groups != 0:
- raise ValueError('in_channels must be divisible by groups')
- if out_channels % groups != 0:
- raise ValueError('out_channels must be divisible by groups')
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.kernel_size = kernel_size
- self.stride = stride
- self.padding = padding
- self.dilation = dilation
- self.groups = groups
- self.weight = nn.Parameter(torch.Tensor(out_channels, in_channels // groups, kernel_size, kernel_size))
- if bias:
- self.bias = nn.Parameter(torch.Tensor(out_channels))
- else:
- self.register_parameter('bias', None)
- self.reset_parameters()
- self.pdc = pdc
-
- def reset_parameters(self):
- nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
- if self.bias is not None:
- fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight)
- bound = 1 / math.sqrt(fan_in)
- nn.init.uniform_(self.bias, -bound, bound)
-
- def forward(self, input):
-
- return self.pdc(input, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
-
-class CSAM(nn.Module):
- """
- Compact Spatial Attention Module
- """
- def __init__(self, channels):
- super(CSAM, self).__init__()
-
- mid_channels = 4
- self.relu1 = nn.ReLU()
- self.conv1 = nn.Conv2d(channels, mid_channels, kernel_size=1, padding=0)
- self.conv2 = nn.Conv2d(mid_channels, 1, kernel_size=3, padding=1, bias=False)
- self.sigmoid = nn.Sigmoid()
- nn.init.constant_(self.conv1.bias, 0)
-
- def forward(self, x):
- y = self.relu1(x)
- y = self.conv1(y)
- y = self.conv2(y)
- y = self.sigmoid(y)
-
- return x * y
-
-class CDCM(nn.Module):
- """
- Compact Dilation Convolution based Module
- """
- def __init__(self, in_channels, out_channels):
- super(CDCM, self).__init__()
-
- self.relu1 = nn.ReLU()
- self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0)
- self.conv2_1 = nn.Conv2d(out_channels, out_channels, kernel_size=3, dilation=5, padding=5, bias=False)
- self.conv2_2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, dilation=7, padding=7, bias=False)
- self.conv2_3 = nn.Conv2d(out_channels, out_channels, kernel_size=3, dilation=9, padding=9, bias=False)
- self.conv2_4 = nn.Conv2d(out_channels, out_channels, kernel_size=3, dilation=11, padding=11, bias=False)
- nn.init.constant_(self.conv1.bias, 0)
-
- def forward(self, x):
- x = self.relu1(x)
- x = self.conv1(x)
- x1 = self.conv2_1(x)
- x2 = self.conv2_2(x)
- x3 = self.conv2_3(x)
- x4 = self.conv2_4(x)
- return x1 + x2 + x3 + x4
-
-
-class MapReduce(nn.Module):
- """
- Reduce feature maps into a single edge map
- """
- def __init__(self, channels):
- super(MapReduce, self).__init__()
- self.conv = nn.Conv2d(channels, 1, kernel_size=1, padding=0)
- nn.init.constant_(self.conv.bias, 0)
-
- def forward(self, x):
- return self.conv(x)
-
-
-class PDCBlock(nn.Module):
- def __init__(self, pdc, inplane, ouplane, stride=1):
- super(PDCBlock, self).__init__()
- self.stride=stride
-
- self.stride=stride
- if self.stride > 1:
- self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
- self.shortcut = nn.Conv2d(inplane, ouplane, kernel_size=1, padding=0)
- self.conv1 = Conv2d(pdc, inplane, inplane, kernel_size=3, padding=1, groups=inplane, bias=False)
- self.relu2 = nn.ReLU()
- self.conv2 = nn.Conv2d(inplane, ouplane, kernel_size=1, padding=0, bias=False)
-
- def forward(self, x):
- if self.stride > 1:
- x = self.pool(x)
- y = self.conv1(x)
- y = self.relu2(y)
- y = self.conv2(y)
- if self.stride > 1:
- x = self.shortcut(x)
- y = y + x
- return y
-
-class PDCBlock_converted(nn.Module):
- """
- CPDC, APDC can be converted to vanilla 3x3 convolution
- RPDC can be converted to vanilla 5x5 convolution
- """
- def __init__(self, pdc, inplane, ouplane, stride=1):
- super(PDCBlock_converted, self).__init__()
- self.stride=stride
-
- if self.stride > 1:
- self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
- self.shortcut = nn.Conv2d(inplane, ouplane, kernel_size=1, padding=0)
- if pdc == 'rd':
- self.conv1 = nn.Conv2d(inplane, inplane, kernel_size=5, padding=2, groups=inplane, bias=False)
- else:
- self.conv1 = nn.Conv2d(inplane, inplane, kernel_size=3, padding=1, groups=inplane, bias=False)
- self.relu2 = nn.ReLU()
- self.conv2 = nn.Conv2d(inplane, ouplane, kernel_size=1, padding=0, bias=False)
-
- def forward(self, x):
- if self.stride > 1:
- x = self.pool(x)
- y = self.conv1(x)
- y = self.relu2(y)
- y = self.conv2(y)
- if self.stride > 1:
- x = self.shortcut(x)
- y = y + x
- return y
-
-class PiDiNet(nn.Module):
- def __init__(self, inplane, pdcs, dil=None, sa=False, convert=False):
- super(PiDiNet, self).__init__()
- self.sa = sa
- if dil is not None:
- assert isinstance(dil, int), 'dil should be an int'
- self.dil = dil
-
- self.fuseplanes = []
-
- self.inplane = inplane
- if convert:
- if pdcs[0] == 'rd':
- init_kernel_size = 5
- init_padding = 2
- else:
- init_kernel_size = 3
- init_padding = 1
- self.init_block = nn.Conv2d(3, self.inplane,
- kernel_size=init_kernel_size, padding=init_padding, bias=False)
- block_class = PDCBlock_converted
- else:
- self.init_block = Conv2d(pdcs[0], 3, self.inplane, kernel_size=3, padding=1)
- block_class = PDCBlock
-
- self.block1_1 = block_class(pdcs[1], self.inplane, self.inplane)
- self.block1_2 = block_class(pdcs[2], self.inplane, self.inplane)
- self.block1_3 = block_class(pdcs[3], self.inplane, self.inplane)
- self.fuseplanes.append(self.inplane) # C
-
- inplane = self.inplane
- self.inplane = self.inplane * 2
- self.block2_1 = block_class(pdcs[4], inplane, self.inplane, stride=2)
- self.block2_2 = block_class(pdcs[5], self.inplane, self.inplane)
- self.block2_3 = block_class(pdcs[6], self.inplane, self.inplane)
- self.block2_4 = block_class(pdcs[7], self.inplane, self.inplane)
- self.fuseplanes.append(self.inplane) # 2C
-
- inplane = self.inplane
- self.inplane = self.inplane * 2
- self.block3_1 = block_class(pdcs[8], inplane, self.inplane, stride=2)
- self.block3_2 = block_class(pdcs[9], self.inplane, self.inplane)
- self.block3_3 = block_class(pdcs[10], self.inplane, self.inplane)
- self.block3_4 = block_class(pdcs[11], self.inplane, self.inplane)
- self.fuseplanes.append(self.inplane) # 4C
-
- self.block4_1 = block_class(pdcs[12], self.inplane, self.inplane, stride=2)
- self.block4_2 = block_class(pdcs[13], self.inplane, self.inplane)
- self.block4_3 = block_class(pdcs[14], self.inplane, self.inplane)
- self.block4_4 = block_class(pdcs[15], self.inplane, self.inplane)
- self.fuseplanes.append(self.inplane) # 4C
-
- self.conv_reduces = nn.ModuleList()
- if self.sa and self.dil is not None:
- self.attentions = nn.ModuleList()
- self.dilations = nn.ModuleList()
- for i in range(4):
- self.dilations.append(CDCM(self.fuseplanes[i], self.dil))
- self.attentions.append(CSAM(self.dil))
- self.conv_reduces.append(MapReduce(self.dil))
- elif self.sa:
- self.attentions = nn.ModuleList()
- for i in range(4):
- self.attentions.append(CSAM(self.fuseplanes[i]))
- self.conv_reduces.append(MapReduce(self.fuseplanes[i]))
- elif self.dil is not None:
- self.dilations = nn.ModuleList()
- for i in range(4):
- self.dilations.append(CDCM(self.fuseplanes[i], self.dil))
- self.conv_reduces.append(MapReduce(self.dil))
- else:
- for i in range(4):
- self.conv_reduces.append(MapReduce(self.fuseplanes[i]))
-
- self.classifier = nn.Conv2d(4, 1, kernel_size=1) # has bias
- nn.init.constant_(self.classifier.weight, 0.25)
- nn.init.constant_(self.classifier.bias, 0)
-
- # print('initialization done')
-
- def get_weights(self):
- conv_weights = []
- bn_weights = []
- relu_weights = []
- for pname, p in self.named_parameters():
- if 'bn' in pname:
- bn_weights.append(p)
- elif 'relu' in pname:
- relu_weights.append(p)
- else:
- conv_weights.append(p)
-
- return conv_weights, bn_weights, relu_weights
-
- def forward(self, x):
- H, W = x.size()[2:]
-
- x = self.init_block(x)
-
- x1 = self.block1_1(x)
- x1 = self.block1_2(x1)
- x1 = self.block1_3(x1)
-
- x2 = self.block2_1(x1)
- x2 = self.block2_2(x2)
- x2 = self.block2_3(x2)
- x2 = self.block2_4(x2)
-
- x3 = self.block3_1(x2)
- x3 = self.block3_2(x3)
- x3 = self.block3_3(x3)
- x3 = self.block3_4(x3)
-
- x4 = self.block4_1(x3)
- x4 = self.block4_2(x4)
- x4 = self.block4_3(x4)
- x4 = self.block4_4(x4)
-
- x_fuses = []
- if self.sa and self.dil is not None:
- for i, xi in enumerate([x1, x2, x3, x4]):
- x_fuses.append(self.attentions[i](self.dilations[i](xi)))
- elif self.sa:
- for i, xi in enumerate([x1, x2, x3, x4]):
- x_fuses.append(self.attentions[i](xi))
- elif self.dil is not None:
- for i, xi in enumerate([x1, x2, x3, x4]):
- x_fuses.append(self.dilations[i](xi))
- else:
- x_fuses = [x1, x2, x3, x4]
-
- e1 = self.conv_reduces[0](x_fuses[0])
- e1 = F.interpolate(e1, (H, W), mode="bilinear", align_corners=False)
-
- e2 = self.conv_reduces[1](x_fuses[1])
- e2 = F.interpolate(e2, (H, W), mode="bilinear", align_corners=False)
-
- e3 = self.conv_reduces[2](x_fuses[2])
- e3 = F.interpolate(e3, (H, W), mode="bilinear", align_corners=False)
-
- e4 = self.conv_reduces[3](x_fuses[3])
- e4 = F.interpolate(e4, (H, W), mode="bilinear", align_corners=False)
-
- outputs = [e1, e2, e3, e4]
-
- output = self.classifier(torch.cat(outputs, dim=1))
- #if not self.training:
- # return torch.sigmoid(output)
-
- outputs.append(output)
- outputs = [torch.sigmoid(r) for r in outputs]
- return outputs
-
-def config_model(model):
- model_options = list(nets.keys())
- assert model in model_options, \
- 'unrecognized model, please choose from %s' % str(model_options)
-
- # print(str(nets[model]))
-
- pdcs = []
- for i in range(16):
- layer_name = 'layer%d' % i
- op = nets[model][layer_name]
- pdcs.append(createConvFunc(op))
-
- return pdcs
-
-def pidinet():
- pdcs = config_model('carv4')
- dil = 24 #if args.dil else None
- return PiDiNet(60, pdcs, dil=dil, sa=True)
-
-
-if __name__ == '__main__':
- model = pidinet()
-# ckp = torch.load('table5_pidinet.pth')['state_dict']
- ckp = torch.load('table5_pidinet.pth', map_location=torch.device('cpu'))['state_dict']
- model.load_state_dict({k.replace('module.',''):v for k, v in ckp.items()})
- im = cv2.imread('examples/test_my/cat_v4.png')
- im = img2tensor(im).unsqueeze(0)/255.
- res = model(im)[-1]
- res = res>0.5
- res = res.float()
- res = (res[0,0].cpu().data.numpy()*255.).astype(np.uint8)
- print(res.shape)
- cv2.imwrite('edge.png', res)
\ No newline at end of file
diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmseg/datasets/custom.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmseg/datasets/custom.py
deleted file mode 100644
index d8eb2a709cc7a3a68fc6a1e3a1ad98faef4c5b7b..0000000000000000000000000000000000000000
--- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmseg/datasets/custom.py
+++ /dev/null
@@ -1,400 +0,0 @@
-import os
-import os.path as osp
-from collections import OrderedDict
-from functools import reduce
-
-import annotator.uniformer.mmcv as mmcv
-import numpy as np
-from annotator.uniformer.mmcv.utils import print_log
-from prettytable import PrettyTable
-from torch.utils.data import Dataset
-
-from annotator.uniformer.mmseg.core import eval_metrics
-from annotator.uniformer.mmseg.utils import get_root_logger
-from .builder import DATASETS
-from .pipelines import Compose
-
-
-@DATASETS.register_module()
-class CustomDataset(Dataset):
- """Custom dataset for semantic segmentation. An example of file structure
- is as followed.
-
- .. code-block:: none
-
- ├── data
- │ ├── my_dataset
- │ │ ├── img_dir
- │ │ │ ├── train
- │ │ │ │ ├── xxx{img_suffix}
- │ │ │ │ ├── yyy{img_suffix}
- │ │ │ │ ├── zzz{img_suffix}
- │ │ │ ├── val
- │ │ ├── ann_dir
- │ │ │ ├── train
- │ │ │ │ ├── xxx{seg_map_suffix}
- │ │ │ │ ├── yyy{seg_map_suffix}
- │ │ │ │ ├── zzz{seg_map_suffix}
- │ │ │ ├── val
-
- The img/gt_semantic_seg pair of CustomDataset should be of the same
- except suffix. A valid img/gt_semantic_seg filename pair should be like
- ``xxx{img_suffix}`` and ``xxx{seg_map_suffix}`` (extension is also included
- in the suffix). If split is given, then ``xxx`` is specified in txt file.
- Otherwise, all files in ``img_dir/``and ``ann_dir`` will be loaded.
- Please refer to ``docs/tutorials/new_dataset.md`` for more details.
-
-
- Args:
- pipeline (list[dict]): Processing pipeline
- img_dir (str): Path to image directory
- img_suffix (str): Suffix of images. Default: '.jpg'
- ann_dir (str, optional): Path to annotation directory. Default: None
- seg_map_suffix (str): Suffix of segmentation maps. Default: '.png'
- split (str, optional): Split txt file. If split is specified, only
- file with suffix in the splits will be loaded. Otherwise, all
- images in img_dir/ann_dir will be loaded. Default: None
- data_root (str, optional): Data root for img_dir/ann_dir. Default:
- None.
- test_mode (bool): If test_mode=True, gt wouldn't be loaded.
- ignore_index (int): The label index to be ignored. Default: 255
- reduce_zero_label (bool): Whether to mark label zero as ignored.
- Default: False
- classes (str | Sequence[str], optional): Specify classes to load.
- If is None, ``cls.CLASSES`` will be used. Default: None.
- palette (Sequence[Sequence[int]]] | np.ndarray | None):
- The palette of segmentation map. If None is given, and
- self.PALETTE is None, random palette will be generated.
- Default: None
- """
-
- CLASSES = None
-
- PALETTE = None
-
- def __init__(self,
- pipeline,
- img_dir,
- img_suffix='.jpg',
- ann_dir=None,
- seg_map_suffix='.png',
- split=None,
- data_root=None,
- test_mode=False,
- ignore_index=255,
- reduce_zero_label=False,
- classes=None,
- palette=None):
- self.pipeline = Compose(pipeline)
- self.img_dir = img_dir
- self.img_suffix = img_suffix
- self.ann_dir = ann_dir
- self.seg_map_suffix = seg_map_suffix
- self.split = split
- self.data_root = data_root
- self.test_mode = test_mode
- self.ignore_index = ignore_index
- self.reduce_zero_label = reduce_zero_label
- self.label_map = None
- self.CLASSES, self.PALETTE = self.get_classes_and_palette(
- classes, palette)
-
- # join paths if data_root is specified
- if self.data_root is not None:
- if not osp.isabs(self.img_dir):
- self.img_dir = osp.join(self.data_root, self.img_dir)
- if not (self.ann_dir is None or osp.isabs(self.ann_dir)):
- self.ann_dir = osp.join(self.data_root, self.ann_dir)
- if not (self.split is None or osp.isabs(self.split)):
- self.split = osp.join(self.data_root, self.split)
-
- # load annotations
- self.img_infos = self.load_annotations(self.img_dir, self.img_suffix,
- self.ann_dir,
- self.seg_map_suffix, self.split)
-
- def __len__(self):
- """Total number of samples of data."""
- return len(self.img_infos)
-
- def load_annotations(self, img_dir, img_suffix, ann_dir, seg_map_suffix,
- split):
- """Load annotation from directory.
-
- Args:
- img_dir (str): Path to image directory
- img_suffix (str): Suffix of images.
- ann_dir (str|None): Path to annotation directory.
- seg_map_suffix (str|None): Suffix of segmentation maps.
- split (str|None): Split txt file. If split is specified, only file
- with suffix in the splits will be loaded. Otherwise, all images
- in img_dir/ann_dir will be loaded. Default: None
-
- Returns:
- list[dict]: All image info of dataset.
- """
-
- img_infos = []
- if split is not None:
- with open(split) as f:
- for line in f:
- img_name = line.strip()
- img_info = dict(filename=img_name + img_suffix)
- if ann_dir is not None:
- seg_map = img_name + seg_map_suffix
- img_info['ann'] = dict(seg_map=seg_map)
- img_infos.append(img_info)
- else:
- for img in mmcv.scandir(img_dir, img_suffix, recursive=True):
- img_info = dict(filename=img)
- if ann_dir is not None:
- seg_map = img.replace(img_suffix, seg_map_suffix)
- img_info['ann'] = dict(seg_map=seg_map)
- img_infos.append(img_info)
-
- print_log(f'Loaded {len(img_infos)} images', logger=get_root_logger())
- return img_infos
-
- def get_ann_info(self, idx):
- """Get annotation by index.
-
- Args:
- idx (int): Index of data.
-
- Returns:
- dict: Annotation info of specified index.
- """
-
- return self.img_infos[idx]['ann']
-
- def pre_pipeline(self, results):
- """Prepare results dict for pipeline."""
- results['seg_fields'] = []
- results['img_prefix'] = self.img_dir
- results['seg_prefix'] = self.ann_dir
- if self.custom_classes:
- results['label_map'] = self.label_map
-
- def __getitem__(self, idx):
- """Get training/test data after pipeline.
-
- Args:
- idx (int): Index of data.
-
- Returns:
- dict: Training/test data (with annotation if `test_mode` is set
- False).
- """
-
- if self.test_mode:
- return self.prepare_test_img(idx)
- else:
- return self.prepare_train_img(idx)
-
- def prepare_train_img(self, idx):
- """Get training data and annotations after pipeline.
-
- Args:
- idx (int): Index of data.
-
- Returns:
- dict: Training data and annotation after pipeline with new keys
- introduced by pipeline.
- """
-
- img_info = self.img_infos[idx]
- ann_info = self.get_ann_info(idx)
- results = dict(img_info=img_info, ann_info=ann_info)
- self.pre_pipeline(results)
- return self.pipeline(results)
-
- def prepare_test_img(self, idx):
- """Get testing data after pipeline.
-
- Args:
- idx (int): Index of data.
-
- Returns:
- dict: Testing data after pipeline with new keys introduced by
- pipeline.
- """
-
- img_info = self.img_infos[idx]
- results = dict(img_info=img_info)
- self.pre_pipeline(results)
- return self.pipeline(results)
-
- def format_results(self, results, **kwargs):
- """Place holder to format result to dataset specific output."""
-
- def get_gt_seg_maps(self, efficient_test=False):
- """Get ground truth segmentation maps for evaluation."""
- gt_seg_maps = []
- for img_info in self.img_infos:
- seg_map = osp.join(self.ann_dir, img_info['ann']['seg_map'])
- if efficient_test:
- gt_seg_map = seg_map
- else:
- gt_seg_map = mmcv.imread(
- seg_map, flag='unchanged', backend='pillow')
- gt_seg_maps.append(gt_seg_map)
- return gt_seg_maps
-
- def get_classes_and_palette(self, classes=None, palette=None):
- """Get class names of current dataset.
-
- Args:
- classes (Sequence[str] | str | None): If classes is None, use
- default CLASSES defined by builtin dataset. If classes is a
- string, take it as a file name. The file contains the name of
- classes where each line contains one class name. If classes is
- a tuple or list, override the CLASSES defined by the dataset.
- palette (Sequence[Sequence[int]]] | np.ndarray | None):
- The palette of segmentation map. If None is given, random
- palette will be generated. Default: None
- """
- if classes is None:
- self.custom_classes = False
- return self.CLASSES, self.PALETTE
-
- self.custom_classes = True
- if isinstance(classes, str):
- # take it as a file path
- class_names = mmcv.list_from_file(classes)
- elif isinstance(classes, (tuple, list)):
- class_names = classes
- else:
- raise ValueError(f'Unsupported type {type(classes)} of classes.')
-
- if self.CLASSES:
- if not set(classes).issubset(self.CLASSES):
- raise ValueError('classes is not a subset of CLASSES.')
-
- # dictionary, its keys are the old label ids and its values
- # are the new label ids.
- # used for changing pixel labels in load_annotations.
- self.label_map = {}
- for i, c in enumerate(self.CLASSES):
- if c not in class_names:
- self.label_map[i] = -1
- else:
- self.label_map[i] = classes.index(c)
-
- palette = self.get_palette_for_custom_classes(class_names, palette)
-
- return class_names, palette
-
- def get_palette_for_custom_classes(self, class_names, palette=None):
-
- if self.label_map is not None:
- # return subset of palette
- palette = []
- for old_id, new_id in sorted(
- self.label_map.items(), key=lambda x: x[1]):
- if new_id != -1:
- palette.append(self.PALETTE[old_id])
- palette = type(self.PALETTE)(palette)
-
- elif palette is None:
- if self.PALETTE is None:
- palette = np.random.randint(0, 255, size=(len(class_names), 3))
- else:
- palette = self.PALETTE
-
- return palette
-
- def evaluate(self,
- results,
- metric='mIoU',
- logger=None,
- efficient_test=False,
- **kwargs):
- """Evaluate the dataset.
-
- Args:
- results (list): Testing results of the dataset.
- metric (str | list[str]): Metrics to be evaluated. 'mIoU',
- 'mDice' and 'mFscore' are supported.
- logger (logging.Logger | None | str): Logger used for printing
- related information during evaluation. Default: None.
-
- Returns:
- dict[str, float]: Default metrics.
- """
-
- if isinstance(metric, str):
- metric = [metric]
- allowed_metrics = ['mIoU', 'mDice', 'mFscore']
- if not set(metric).issubset(set(allowed_metrics)):
- raise KeyError('metric {} is not supported'.format(metric))
- eval_results = {}
- gt_seg_maps = self.get_gt_seg_maps(efficient_test)
- if self.CLASSES is None:
- num_classes = len(
- reduce(np.union1d, [np.unique(_) for _ in gt_seg_maps]))
- else:
- num_classes = len(self.CLASSES)
- ret_metrics = eval_metrics(
- results,
- gt_seg_maps,
- num_classes,
- self.ignore_index,
- metric,
- label_map=self.label_map,
- reduce_zero_label=self.reduce_zero_label)
-
- if self.CLASSES is None:
- class_names = tuple(range(num_classes))
- else:
- class_names = self.CLASSES
-
- # summary table
- ret_metrics_summary = OrderedDict({
- ret_metric: np.round(np.nanmean(ret_metric_value) * 100, 2)
- for ret_metric, ret_metric_value in ret_metrics.items()
- })
-
- # each class table
- ret_metrics.pop('aAcc', None)
- ret_metrics_class = OrderedDict({
- ret_metric: np.round(ret_metric_value * 100, 2)
- for ret_metric, ret_metric_value in ret_metrics.items()
- })
- ret_metrics_class.update({'Class': class_names})
- ret_metrics_class.move_to_end('Class', last=False)
-
- # for logger
- class_table_data = PrettyTable()
- for key, val in ret_metrics_class.items():
- class_table_data.add_column(key, val)
-
- summary_table_data = PrettyTable()
- for key, val in ret_metrics_summary.items():
- if key == 'aAcc':
- summary_table_data.add_column(key, [val])
- else:
- summary_table_data.add_column('m' + key, [val])
-
- print_log('per class results:', logger)
- print_log('\n' + class_table_data.get_string(), logger=logger)
- print_log('Summary:', logger)
- print_log('\n' + summary_table_data.get_string(), logger=logger)
-
- # each metric dict
- for key, value in ret_metrics_summary.items():
- if key == 'aAcc':
- eval_results[key] = value / 100.0
- else:
- eval_results['m' + key] = value / 100.0
-
- ret_metrics_class.pop('Class', None)
- for key, value in ret_metrics_class.items():
- eval_results.update({
- key + '.' + str(name): value[idx] / 100.0
- for idx, name in enumerate(class_names)
- })
-
- if mmcv.is_list_of(results, str):
- for file_name in results:
- os.remove(file_name)
- return eval_results
diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmseg/models/losses/lovasz_loss.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmseg/models/losses/lovasz_loss.py
deleted file mode 100644
index 6badb67f6d987b59fb07aa97caaaf89896e27a8d..0000000000000000000000000000000000000000
--- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmseg/models/losses/lovasz_loss.py
+++ /dev/null
@@ -1,303 +0,0 @@
-"""Modified from https://github.com/bermanmaxim/LovaszSoftmax/blob/master/pytor
-ch/lovasz_losses.py Lovasz-Softmax and Jaccard hinge loss in PyTorch Maxim
-Berman 2018 ESAT-PSI KU Leuven (MIT License)"""
-
-import annotator.uniformer.mmcv as mmcv
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-from ..builder import LOSSES
-from .utils import get_class_weight, weight_reduce_loss
-
-
-def lovasz_grad(gt_sorted):
- """Computes gradient of the Lovasz extension w.r.t sorted errors.
-
- See Alg. 1 in paper.
- """
- p = len(gt_sorted)
- gts = gt_sorted.sum()
- intersection = gts - gt_sorted.float().cumsum(0)
- union = gts + (1 - gt_sorted).float().cumsum(0)
- jaccard = 1. - intersection / union
- if p > 1: # cover 1-pixel case
- jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
- return jaccard
-
-
-def flatten_binary_logits(logits, labels, ignore_index=None):
- """Flattens predictions in the batch (binary case) Remove labels equal to
- 'ignore_index'."""
- logits = logits.view(-1)
- labels = labels.view(-1)
- if ignore_index is None:
- return logits, labels
- valid = (labels != ignore_index)
- vlogits = logits[valid]
- vlabels = labels[valid]
- return vlogits, vlabels
-
-
-def flatten_probs(probs, labels, ignore_index=None):
- """Flattens predictions in the batch."""
- if probs.dim() == 3:
- # assumes output of a sigmoid layer
- B, H, W = probs.size()
- probs = probs.view(B, 1, H, W)
- B, C, H, W = probs.size()
- probs = probs.permute(0, 2, 3, 1).contiguous().view(-1, C) # B*H*W, C=P,C
- labels = labels.view(-1)
- if ignore_index is None:
- return probs, labels
- valid = (labels != ignore_index)
- vprobs = probs[valid.nonzero().squeeze()]
- vlabels = labels[valid]
- return vprobs, vlabels
-
-
-def lovasz_hinge_flat(logits, labels):
- """Binary Lovasz hinge loss.
-
- Args:
- logits (torch.Tensor): [P], logits at each prediction
- (between -infty and +infty).
- labels (torch.Tensor): [P], binary ground truth labels (0 or 1).
-
- Returns:
- torch.Tensor: The calculated loss.
- """
- if len(labels) == 0:
- # only void pixels, the gradients should be 0
- return logits.sum() * 0.
- signs = 2. * labels.float() - 1.
- errors = (1. - logits * signs)
- errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
- perm = perm.data
- gt_sorted = labels[perm]
- grad = lovasz_grad(gt_sorted)
- loss = torch.dot(F.relu(errors_sorted), grad)
- return loss
-
-
-def lovasz_hinge(logits,
- labels,
- classes='present',
- per_image=False,
- class_weight=None,
- reduction='mean',
- avg_factor=None,
- ignore_index=255):
- """Binary Lovasz hinge loss.
-
- Args:
- logits (torch.Tensor): [B, H, W], logits at each pixel
- (between -infty and +infty).
- labels (torch.Tensor): [B, H, W], binary ground truth masks (0 or 1).
- classes (str | list[int], optional): Placeholder, to be consistent with
- other loss. Default: None.
- per_image (bool, optional): If per_image is True, compute the loss per
- image instead of per batch. Default: False.
- class_weight (list[float], optional): Placeholder, to be consistent
- with other loss. Default: None.
- reduction (str, optional): The method used to reduce the loss. Options
- are "none", "mean" and "sum". This parameter only works when
- per_image is True. Default: 'mean'.
- avg_factor (int, optional): Average factor that is used to average
- the loss. This parameter only works when per_image is True.
- Default: None.
- ignore_index (int | None): The label index to be ignored. Default: 255.
-
- Returns:
- torch.Tensor: The calculated loss.
- """
- if per_image:
- loss = [
- lovasz_hinge_flat(*flatten_binary_logits(
- logit.unsqueeze(0), label.unsqueeze(0), ignore_index))
- for logit, label in zip(logits, labels)
- ]
- loss = weight_reduce_loss(
- torch.stack(loss), None, reduction, avg_factor)
- else:
- loss = lovasz_hinge_flat(
- *flatten_binary_logits(logits, labels, ignore_index))
- return loss
-
-
-def lovasz_softmax_flat(probs, labels, classes='present', class_weight=None):
- """Multi-class Lovasz-Softmax loss.
-
- Args:
- probs (torch.Tensor): [P, C], class probabilities at each prediction
- (between 0 and 1).
- labels (torch.Tensor): [P], ground truth labels (between 0 and C - 1).
- classes (str | list[int], optional): Classes chosen to calculate loss.
- 'all' for all classes, 'present' for classes present in labels, or
- a list of classes to average. Default: 'present'.
- class_weight (list[float], optional): The weight for each class.
- Default: None.
-
- Returns:
- torch.Tensor: The calculated loss.
- """
- if probs.numel() == 0:
- # only void pixels, the gradients should be 0
- return probs * 0.
- C = probs.size(1)
- losses = []
- class_to_sum = list(range(C)) if classes in ['all', 'present'] else classes
- for c in class_to_sum:
- fg = (labels == c).float() # foreground for class c
- if (classes == 'present' and fg.sum() == 0):
- continue
- if C == 1:
- if len(classes) > 1:
- raise ValueError('Sigmoid output possible only with 1 class')
- class_pred = probs[:, 0]
- else:
- class_pred = probs[:, c]
- errors = (fg - class_pred).abs()
- errors_sorted, perm = torch.sort(errors, 0, descending=True)
- perm = perm.data
- fg_sorted = fg[perm]
- loss = torch.dot(errors_sorted, lovasz_grad(fg_sorted))
- if class_weight is not None:
- loss *= class_weight[c]
- losses.append(loss)
- return torch.stack(losses).mean()
-
-
-def lovasz_softmax(probs,
- labels,
- classes='present',
- per_image=False,
- class_weight=None,
- reduction='mean',
- avg_factor=None,
- ignore_index=255):
- """Multi-class Lovasz-Softmax loss.
-
- Args:
- probs (torch.Tensor): [B, C, H, W], class probabilities at each
- prediction (between 0 and 1).
- labels (torch.Tensor): [B, H, W], ground truth labels (between 0 and
- C - 1).
- classes (str | list[int], optional): Classes chosen to calculate loss.
- 'all' for all classes, 'present' for classes present in labels, or
- a list of classes to average. Default: 'present'.
- per_image (bool, optional): If per_image is True, compute the loss per
- image instead of per batch. Default: False.
- class_weight (list[float], optional): The weight for each class.
- Default: None.
- reduction (str, optional): The method used to reduce the loss. Options
- are "none", "mean" and "sum". This parameter only works when
- per_image is True. Default: 'mean'.
- avg_factor (int, optional): Average factor that is used to average
- the loss. This parameter only works when per_image is True.
- Default: None.
- ignore_index (int | None): The label index to be ignored. Default: 255.
-
- Returns:
- torch.Tensor: The calculated loss.
- """
-
- if per_image:
- loss = [
- lovasz_softmax_flat(
- *flatten_probs(
- prob.unsqueeze(0), label.unsqueeze(0), ignore_index),
- classes=classes,
- class_weight=class_weight)
- for prob, label in zip(probs, labels)
- ]
- loss = weight_reduce_loss(
- torch.stack(loss), None, reduction, avg_factor)
- else:
- loss = lovasz_softmax_flat(
- *flatten_probs(probs, labels, ignore_index),
- classes=classes,
- class_weight=class_weight)
- return loss
-
-
-@LOSSES.register_module()
-class LovaszLoss(nn.Module):
- """LovaszLoss.
-
- This loss is proposed in `The Lovasz-Softmax loss: A tractable surrogate
- for the optimization of the intersection-over-union measure in neural
- networks `_.
-
- Args:
- loss_type (str, optional): Binary or multi-class loss.
- Default: 'multi_class'. Options are "binary" and "multi_class".
- classes (str | list[int], optional): Classes chosen to calculate loss.
- 'all' for all classes, 'present' for classes present in labels, or
- a list of classes to average. Default: 'present'.
- per_image (bool, optional): If per_image is True, compute the loss per
- image instead of per batch. Default: False.
- reduction (str, optional): The method used to reduce the loss. Options
- are "none", "mean" and "sum". This parameter only works when
- per_image is True. Default: 'mean'.
- class_weight (list[float] | str, optional): Weight of each class. If in
- str format, read them from a file. Defaults to None.
- loss_weight (float, optional): Weight of the loss. Defaults to 1.0.
- """
-
- def __init__(self,
- loss_type='multi_class',
- classes='present',
- per_image=False,
- reduction='mean',
- class_weight=None,
- loss_weight=1.0):
- super(LovaszLoss, self).__init__()
- assert loss_type in ('binary', 'multi_class'), "loss_type should be \
- 'binary' or 'multi_class'."
-
- if loss_type == 'binary':
- self.cls_criterion = lovasz_hinge
- else:
- self.cls_criterion = lovasz_softmax
- assert classes in ('all', 'present') or mmcv.is_list_of(classes, int)
- if not per_image:
- assert reduction == 'none', "reduction should be 'none' when \
- per_image is False."
-
- self.classes = classes
- self.per_image = per_image
- self.reduction = reduction
- self.loss_weight = loss_weight
- self.class_weight = get_class_weight(class_weight)
-
- def forward(self,
- cls_score,
- label,
- weight=None,
- avg_factor=None,
- reduction_override=None,
- **kwargs):
- """Forward function."""
- assert reduction_override in (None, 'none', 'mean', 'sum')
- reduction = (
- reduction_override if reduction_override else self.reduction)
- if self.class_weight is not None:
- class_weight = cls_score.new_tensor(self.class_weight)
- else:
- class_weight = None
-
- # if multi-class loss, transform logits to probs
- if self.cls_criterion == lovasz_softmax:
- cls_score = F.softmax(cls_score, dim=1)
-
- loss_cls = self.loss_weight * self.cls_criterion(
- cls_score,
- label,
- self.classes,
- self.per_image,
- class_weight=class_weight,
- reduction=reduction,
- avg_factor=avg_factor,
- **kwargs)
- return loss_cls
diff --git a/spaces/cozyanduofen/bingo/src/components/tailwind-indicator.tsx b/spaces/cozyanduofen/bingo/src/components/tailwind-indicator.tsx
deleted file mode 100644
index f2a1291213dd67055fcebe67fab574c8441338df..0000000000000000000000000000000000000000
--- a/spaces/cozyanduofen/bingo/src/components/tailwind-indicator.tsx
+++ /dev/null
@@ -1,14 +0,0 @@
-export function TailwindIndicator() {
- if (process.env.NODE_ENV === 'production') return null
-
- return (
-
-
xs
-
sm
-
md
-
lg
-
xl
-
2xl
-
- )
-}
diff --git a/spaces/cozyanduofen/bingo/src/lib/storage.ts b/spaces/cozyanduofen/bingo/src/lib/storage.ts
deleted file mode 100644
index a5b7825c4f76a28c704da512ae39e8bb45addd09..0000000000000000000000000000000000000000
--- a/spaces/cozyanduofen/bingo/src/lib/storage.ts
+++ /dev/null
@@ -1,27 +0,0 @@
-import { getMany, set, del, clear } from 'idb-keyval';
-
-export const Storage = {
- async get(key: string | string[] | null): Promise {
- if (key === null) return null;
- if (typeof key === 'string') {
- key = [key]
- }
- const returnData: Record = {}
- const values = await getMany(key)
- key.forEach((k, idx)=> {
- returnData[k] = values[idx]
- })
- return returnData;
- },
- async set(object: any) {
- for (let key of Object.keys(object)) {
- await set(key, object[key])
- }
- },
- async remove(key: string) {
- return del(key);
- },
- async clear() {
- return clear();
- }
-}
diff --git a/spaces/cscan/CodeFormer/CodeFormer/basicsr/utils/__init__.py b/spaces/cscan/CodeFormer/CodeFormer/basicsr/utils/__init__.py
deleted file mode 100644
index 5fcc1d540462712387523d1e326d1dfc2bcfbf32..0000000000000000000000000000000000000000
--- a/spaces/cscan/CodeFormer/CodeFormer/basicsr/utils/__init__.py
+++ /dev/null
@@ -1,29 +0,0 @@
-from .file_client import FileClient
-from .img_util import crop_border, imfrombytes, img2tensor, imwrite, tensor2img
-from .logger import MessageLogger, get_env_info, get_root_logger, init_tb_logger, init_wandb_logger
-from .misc import check_resume, get_time_str, make_exp_dirs, mkdir_and_rename, scandir, set_random_seed, sizeof_fmt
-
-__all__ = [
- # file_client.py
- 'FileClient',
- # img_util.py
- 'img2tensor',
- 'tensor2img',
- 'imfrombytes',
- 'imwrite',
- 'crop_border',
- # logger.py
- 'MessageLogger',
- 'init_tb_logger',
- 'init_wandb_logger',
- 'get_root_logger',
- 'get_env_info',
- # misc.py
- 'set_random_seed',
- 'get_time_str',
- 'mkdir_and_rename',
- 'make_exp_dirs',
- 'scandir',
- 'check_resume',
- 'sizeof_fmt'
-]
diff --git a/spaces/cvlab/zero123-live/taming-transformers/scripts/extract_submodel.py b/spaces/cvlab/zero123-live/taming-transformers/scripts/extract_submodel.py
deleted file mode 100644
index 559bc5e04281a7cf833a82e3cd48627b20f1a76d..0000000000000000000000000000000000000000
--- a/spaces/cvlab/zero123-live/taming-transformers/scripts/extract_submodel.py
+++ /dev/null
@@ -1,17 +0,0 @@
-import torch
-import sys
-
-if __name__ == "__main__":
- inpath = sys.argv[1]
- outpath = sys.argv[2]
- submodel = "cond_stage_model"
- if len(sys.argv) > 3:
- submodel = sys.argv[3]
-
- print("Extracting {} from {} to {}.".format(submodel, inpath, outpath))
-
- sd = torch.load(inpath, map_location="cpu")
- new_sd = {"state_dict": dict((k.split(".", 1)[-1],v)
- for k,v in sd["state_dict"].items()
- if k.startswith("cond_stage_model"))}
- torch.save(new_sd, outpath)
diff --git a/spaces/daarumadx/bot/src/argv/run/config.py b/spaces/daarumadx/bot/src/argv/run/config.py
deleted file mode 100644
index e70d529dc699502962b1deea459582aba5c867f6..0000000000000000000000000000000000000000
--- a/spaces/daarumadx/bot/src/argv/run/config.py
+++ /dev/null
@@ -1,76 +0,0 @@
-import os
-
-import gpu_info
-from argv.checkpoints import set_arg_checkpoints, check_arg_checkpoints
-from utils import check_image_file_validity, is_a_supported_image_file_extension, check_url
-from loader import Loader
-from loader.fs import FSLoader
-from loader.http import HTTPLoader
-
-
-def set_args_run_parser(args):
- set_arg_checkpoints(args)
- set_arg_preference(args)
- set_gpu_ids(args)
-
-
-def check_args_run_parser(parser, args):
- check_arg_input(parser, args)
- check_arg_output(parser, args)
- #check_args_altered(parser, args)
- check_arg_checkpoints(parser, args)
-
-
-def check_args_altered(parser, args):
- if args.steps and not args.altered:
- parser.error("--steps requires --altered.")
- elif args.steps and args.altered:
- if not os.path.isdir(args.altered):
- parser.error("{} directory doesn't exist.".format(args.altered))
-
-
-def set_gpu_ids(args):
- if args.cpu:
- args.gpu_ids = None
- elif args.gpu:
- args.gpu_ids = args.gpu
- else:
- args.gpu_ids = None if not gpu_info.get_info()['has_cuda'] else [0]
-
-
-def check_arg_input(parser, args):
- if not args.input:
- parser.error("-i, --input INPUT is required.")
-
- loader = Loader.get_loader(args.input)
- if loader == FSLoader:
- if os.path.isfile(args.input) and not is_a_supported_image_file_extension(args.input):
- parser.error("Input {} file not supported format.".format(args.input))
- if os.path.isfile(args.input):
- check_image_file_validity(args.input)
- elif loader == HTTPLoader:
- if not check_url(args.input):
- parser.error("Url {} of the http ressource doesn't exist or is not accesible.".format(args.input))
- if not is_a_supported_image_file_extension(args.input):
- parser.error("Url {} is not file with a supported extension format.".format(args.input))
- else:
- parser.error("Input {} is not a valid file or directory or url.".format(args.input))
- return args.input
-
-
-def check_arg_output(parser, args):
- if os.path.isfile(args.input) and not args.output:
- _, extension = os.path.splitext(args.input)
- args.output = "output{}".format(extension)
- elif args.output and os.path.isfile(args.input) and not is_a_supported_image_file_extension(args.output):
- parser.error("Output {} file not a supported format.".format(args.output))
-
-
-def set_arg_preference(args):
- args.prefs = {
- "titsize": args.bsize,
- "aursize": args.asize,
- "nipsize": args.nsize,
- "vagsize": args.vsize,
- "hairsize": args.hsize
- }
diff --git a/spaces/daddyjin/TalkingFaceGeneration/FONT/modules/ops.py b/spaces/daddyjin/TalkingFaceGeneration/FONT/modules/ops.py
deleted file mode 100644
index ed4f285fdee447e715089e14544d5e47b90c7fb9..0000000000000000000000000000000000000000
--- a/spaces/daddyjin/TalkingFaceGeneration/FONT/modules/ops.py
+++ /dev/null
@@ -1,77 +0,0 @@
-import torch
-import torchvision
-import torch.nn as nn
-import torch.nn.init as init
-from torch.autograd import Variable
-
-
-def linear(channel_in, channel_out,
- activation=nn.ReLU,
- normalizer=nn.BatchNorm1d):
- layer = list()
- bias = True if not normalizer else False
-
- layer.append(nn.Linear(channel_in, channel_out, bias=bias))
- _apply(layer, activation, normalizer, channel_out)
- # init.kaiming_normal(layer[0].weight)
-
- return nn.Sequential(*layer)
-
-
-def conv2d(channel_in, channel_out,
- ksize=3, stride=1, padding=1,
- activation=nn.ReLU,
- normalizer=nn.BatchNorm2d):
- layer = list()
- bias = True if not normalizer else False
-
- layer.append(nn.Conv2d(channel_in, channel_out,
- ksize, stride, padding,
- bias=bias))
- _apply(layer, activation, normalizer, channel_out)
- # init.kaiming_normal(layer[0].weight)
-
- return nn.Sequential(*layer)
-
-
-def conv_transpose2d(channel_in, channel_out,
- ksize=4, stride=2, padding=1,
- activation=nn.ReLU,
- normalizer=nn.BatchNorm2d):
- layer = list()
- bias = True if not normalizer else False
-
- layer.append(nn.ConvTranspose2d(channel_in, channel_out,
- ksize, stride, padding,
- bias=bias))
- _apply(layer, activation, normalizer, channel_out)
- # init.kaiming_normal(layer[0].weight)
-
- return nn.Sequential(*layer)
-
-
-def nn_conv2d(channel_in, channel_out,
- ksize=3, stride=1, padding=1,
- scale_factor=2,
- activation=nn.ReLU,
- normalizer=nn.BatchNorm2d):
- layer = list()
- bias = True if not normalizer else False
-
- layer.append(nn.UpsamplingNearest2d(scale_factor=scale_factor))
- layer.append(nn.Conv2d(channel_in, channel_out,
- ksize, stride, padding,
- bias=bias))
- _apply(layer, activation, normalizer, channel_out)
- # init.kaiming_normal(layer[1].weight)
-
- return nn.Sequential(*layer)
-
-
-def _apply(layer, activation, normalizer, channel_out=None):
- if normalizer:
- layer.append(normalizer(channel_out))
- if activation:
- layer.append(activation())
- return layer
-
diff --git a/spaces/datastx/ChatWithADocDocker/app.py b/spaces/datastx/ChatWithADocDocker/app.py
deleted file mode 100644
index 3a7144ed204235dd32db3eb66a291a6e174b2a69..0000000000000000000000000000000000000000
--- a/spaces/datastx/ChatWithADocDocker/app.py
+++ /dev/null
@@ -1,70 +0,0 @@
-import os
-import gradio as gr
-import transformers
-from torch import bfloat16
-from threading import Thread
-from gradio.themes.utils.colors import Color
-
-# Download model and tokenizer files
-os.system('bash download_model.sh')
-
-model_id = "/app/medllama2_7b"
-tokenizer = transformers.AutoTokenizer.from_pretrained(model_id)
-model_config = transformers.AutoConfig.from_pretrained(model_id)
-
-bnb_config = transformers.BitsAndBytesConfig(
- load_in_4bit=True,
- bnb_4bit_quant_type='nf4',
- bnb_4bit_use_double_quant=True,
- bnb_4bit_compute_dtype=bfloat16
-)
-model = transformers.AutoModelForCausalLM.from_pretrained(
- model_id,
- trust_remote_code=True,
- config=model_config,
- quantization_config=bnb_config,
- device_map='auto'
-)
-
-prompts = ["You are a helpful AI Doctor."]
-
-def prompt_build(system_prompt, user_inp, hist):
- prompt = f"""### System:\n{system_prompt}\n\n"""
-
- for pair in hist:
- prompt += f"""### User:\n{pair[0]}\n\n### Assistant:\n{pair[1]}\n\n"""
-
- prompt += f"""### User:\n{user_inp}\n\n### Assistant:"""
- return prompt
-
-def chat(user_input, history, system_prompt):
-
- prompt = prompt_build(system_prompt, user_input, history)
- model_inputs = tokenizer([prompt], return_tensors="pt").to("cuda")
-
- streamer = transformers.TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True)
-
- generate_kwargs = dict(
- model_inputs,
- streamer=streamer,
- max_length=2048,
- do_sample=True,
- top_p=0.95,
- temperature=0.8,
- top_k=50
- )
- t = Thread(target=model.generate, kwargs=generate_kwargs)
- t.start()
-
- model_output = ""
- for new_text in streamer:
- model_output += new_text
- yield model_output
- return model_output
-
-if __name__ == "__main__":
- with gr.Blocks() as demo:
- dropdown = gr.Dropdown(choices=prompts, label="Type your own or select a system prompt", value="You are a helpful AI Doctor.", allow_custom_value=True)
- chatbot = gr.ChatInterface(fn=chat, additional_inputs=[dropdown])
-
- demo.queue(api_open=False).launch(show_api=False, share=True)
diff --git a/spaces/dbredvick/whisper-webui/app.py b/spaces/dbredvick/whisper-webui/app.py
deleted file mode 100644
index 3cf01d86a9568242fad646a1d3c33773c5334889..0000000000000000000000000000000000000000
--- a/spaces/dbredvick/whisper-webui/app.py
+++ /dev/null
@@ -1,309 +0,0 @@
-import math
-from typing import Iterator
-import argparse
-
-from io import StringIO
-import os
-import pathlib
-import tempfile
-from src.vadParallel import ParallelContext, ParallelTranscription
-
-from src.whisperContainer import WhisperContainer, WhisperModelCache
-
-# External programs
-import ffmpeg
-
-# UI
-import gradio as gr
-
-from src.download import ExceededMaximumDuration, download_url
-from src.utils import slugify, write_srt, write_vtt
-from src.vad import AbstractTranscription, NonSpeechStrategy, PeriodicTranscriptionConfig, TranscriptionConfig, VadPeriodicTranscription, VadSileroTranscription
-
-# Limitations (set to -1 to disable)
-DEFAULT_INPUT_AUDIO_MAX_DURATION = -1 # seconds
-
-# Whether or not to automatically delete all uploaded files, to save disk space
-DELETE_UPLOADED_FILES = True
-
-# Gradio seems to truncate files without keeping the extension, so we need to truncate the file prefix ourself
-MAX_FILE_PREFIX_LENGTH = 17
-
-LANGUAGES = [
- "English", "Chinese", "German", "Spanish", "Russian", "Korean",
- "French", "Japanese", "Portuguese", "Turkish", "Polish", "Catalan",
- "Dutch", "Arabic", "Swedish", "Italian", "Indonesian", "Hindi",
- "Finnish", "Vietnamese", "Hebrew", "Ukrainian", "Greek", "Malay",
- "Czech", "Romanian", "Danish", "Hungarian", "Tamil", "Norwegian",
- "Thai", "Urdu", "Croatian", "Bulgarian", "Lithuanian", "Latin",
- "Maori", "Malayalam", "Welsh", "Slovak", "Telugu", "Persian",
- "Latvian", "Bengali", "Serbian", "Azerbaijani", "Slovenian",
- "Kannada", "Estonian", "Macedonian", "Breton", "Basque", "Icelandic",
- "Armenian", "Nepali", "Mongolian", "Bosnian", "Kazakh", "Albanian",
- "Swahili", "Galician", "Marathi", "Punjabi", "Sinhala", "Khmer",
- "Shona", "Yoruba", "Somali", "Afrikaans", "Occitan", "Georgian",
- "Belarusian", "Tajik", "Sindhi", "Gujarati", "Amharic", "Yiddish",
- "Lao", "Uzbek", "Faroese", "Haitian Creole", "Pashto", "Turkmen",
- "Nynorsk", "Maltese", "Sanskrit", "Luxembourgish", "Myanmar", "Tibetan",
- "Tagalog", "Malagasy", "Assamese", "Tatar", "Hawaiian", "Lingala",
- "Hausa", "Bashkir", "Javanese", "Sundanese"
-]
-
-class WhisperTranscriber:
- def __init__(self, input_audio_max_duration: float = DEFAULT_INPUT_AUDIO_MAX_DURATION, vad_process_timeout: float = None, delete_uploaded_files: bool = DELETE_UPLOADED_FILES):
- self.model_cache = WhisperModelCache()
- self.parallel_device_list = None
- self.parallel_context = None
- self.vad_process_timeout = vad_process_timeout
-
- self.vad_model = None
- self.inputAudioMaxDuration = input_audio_max_duration
- self.deleteUploadedFiles = delete_uploaded_files
-
- def set_parallel_devices(self, vad_parallel_devices: str):
- self.parallel_device_list = [ device.strip() for device in vad_parallel_devices.split(",") ] if vad_parallel_devices else None
-
- def transcribe_webui(self, modelName, languageName, urlData, uploadFile, microphoneData, task, vad, vadMergeWindow, vadMaxMergeSize, vadPadding, vadPromptWindow):
- try:
- source, sourceName = self.__get_source(urlData, uploadFile, microphoneData)
-
- try:
- selectedLanguage = languageName.lower() if len(languageName) > 0 else None
- selectedModel = modelName if modelName is not None else "base"
-
- model = WhisperContainer(model_name=selectedModel, cache=self.model_cache)
-
- # Execute whisper
- result = self.transcribe_file(model, source, selectedLanguage, task, vad, vadMergeWindow, vadMaxMergeSize, vadPadding, vadPromptWindow)
-
- # Write result
- downloadDirectory = tempfile.mkdtemp()
-
- filePrefix = slugify(sourceName, allow_unicode=True)
- download, text, vtt = self.write_result(result, filePrefix, downloadDirectory)
-
- return download, text, vtt
-
- finally:
- # Cleanup source
- if self.deleteUploadedFiles:
- print("Deleting source file " + source)
- os.remove(source)
-
- except ExceededMaximumDuration as e:
- return [], ("[ERROR]: Maximum remote video length is " + str(e.maxDuration) + "s, file was " + str(e.videoDuration) + "s"), "[ERROR]"
-
- def transcribe_file(self, model: WhisperContainer, audio_path: str, language: str, task: str = None, vad: str = None,
- vadMergeWindow: float = 5, vadMaxMergeSize: float = 150, vadPadding: float = 1, vadPromptWindow: float = 1, **decodeOptions: dict):
-
- initial_prompt = decodeOptions.pop('initial_prompt', None)
-
- if ('task' in decodeOptions):
- task = decodeOptions.pop('task')
-
- # Callable for processing an audio file
- whisperCallable = model.create_callback(language, task, initial_prompt, **decodeOptions)
-
- # The results
- if (vad == 'silero-vad'):
- # Silero VAD where non-speech gaps are transcribed
- process_gaps = self._create_silero_config(NonSpeechStrategy.CREATE_SEGMENT, vadMergeWindow, vadMaxMergeSize, vadPadding, vadPromptWindow)
- result = self.process_vad(audio_path, whisperCallable, self.vad_model, process_gaps)
- elif (vad == 'silero-vad-skip-gaps'):
- # Silero VAD where non-speech gaps are simply ignored
- skip_gaps = self._create_silero_config(NonSpeechStrategy.SKIP, vadMergeWindow, vadMaxMergeSize, vadPadding, vadPromptWindow)
- result = self.process_vad(audio_path, whisperCallable, self.vad_model, skip_gaps)
- elif (vad == 'silero-vad-expand-into-gaps'):
- # Use Silero VAD where speech-segments are expanded into non-speech gaps
- expand_gaps = self._create_silero_config(NonSpeechStrategy.EXPAND_SEGMENT, vadMergeWindow, vadMaxMergeSize, vadPadding, vadPromptWindow)
- result = self.process_vad(audio_path, whisperCallable, self.vad_model, expand_gaps)
- elif (vad == 'periodic-vad'):
- # Very simple VAD - mark every 5 minutes as speech. This makes it less likely that Whisper enters an infinite loop, but
- # it may create a break in the middle of a sentence, causing some artifacts.
- periodic_vad = VadPeriodicTranscription()
- period_config = PeriodicTranscriptionConfig(periodic_duration=vadMaxMergeSize, max_prompt_window=vadPromptWindow)
- result = self.process_vad(audio_path, whisperCallable, periodic_vad, period_config)
-
- else:
- if (self._has_parallel_devices()):
- # Use a simple period transcription instead, as we need to use the parallel context
- periodic_vad = VadPeriodicTranscription()
- period_config = PeriodicTranscriptionConfig(periodic_duration=math.inf, max_prompt_window=1)
-
- result = self.process_vad(audio_path, whisperCallable, periodic_vad, period_config)
- else:
- # Default VAD
- result = whisperCallable(audio_path, 0, None, None)
-
- return result
-
- def process_vad(self, audio_path, whisperCallable, vadModel: AbstractTranscription, vadConfig: TranscriptionConfig):
- if (not self._has_parallel_devices()):
- # No parallel devices, so just run the VAD and Whisper in sequence
- return vadModel.transcribe(audio_path, whisperCallable, vadConfig)
-
- # Create parallel context if needed
- if (self.parallel_context is None):
- # Create a context wih processes and automatically clear the pool after 1 hour of inactivity
- self.parallel_context = ParallelContext(num_processes=len(self.parallel_device_list), auto_cleanup_timeout_seconds=self.vad_process_timeout)
-
- parallel_vad = ParallelTranscription()
- return parallel_vad.transcribe_parallel(transcription=vadModel, audio=audio_path, whisperCallable=whisperCallable,
- config=vadConfig, devices=self.parallel_device_list, parallel_context=self.parallel_context)
-
- def _has_parallel_devices(self):
- return self.parallel_device_list is not None and len(self.parallel_device_list) > 0
-
- def _concat_prompt(self, prompt1, prompt2):
- if (prompt1 is None):
- return prompt2
- elif (prompt2 is None):
- return prompt1
- else:
- return prompt1 + " " + prompt2
-
- def _create_silero_config(self, non_speech_strategy: NonSpeechStrategy, vadMergeWindow: float = 5, vadMaxMergeSize: float = 150, vadPadding: float = 1, vadPromptWindow: float = 1):
- # Use Silero VAD
- if (self.vad_model is None):
- self.vad_model = VadSileroTranscription()
-
- config = TranscriptionConfig(non_speech_strategy = non_speech_strategy,
- max_silent_period=vadMergeWindow, max_merge_size=vadMaxMergeSize,
- segment_padding_left=vadPadding, segment_padding_right=vadPadding,
- max_prompt_window=vadPromptWindow)
-
- return config
-
- def write_result(self, result: dict, source_name: str, output_dir: str):
- if not os.path.exists(output_dir):
- os.makedirs(output_dir)
-
- text = result["text"]
- language = result["language"]
- languageMaxLineWidth = self.__get_max_line_width(language)
-
- print("Max line width " + str(languageMaxLineWidth))
- vtt = self.__get_subs(result["segments"], "vtt", languageMaxLineWidth)
- srt = self.__get_subs(result["segments"], "srt", languageMaxLineWidth)
-
- output_files = []
- output_files.append(self.__create_file(srt, output_dir, source_name + "-subs.srt"));
- output_files.append(self.__create_file(vtt, output_dir, source_name + "-subs.vtt"));
- output_files.append(self.__create_file(text, output_dir, source_name + "-transcript.txt"));
-
- return output_files, text, vtt
-
- def clear_cache(self):
- self.model_cache.clear()
- self.vad_model = None
-
- def __get_source(self, urlData, uploadFile, microphoneData):
- if urlData:
- # Download from YouTube
- source = download_url(urlData, self.inputAudioMaxDuration)[0]
- else:
- # File input
- source = uploadFile if uploadFile is not None else microphoneData
-
- if self.inputAudioMaxDuration > 0:
- # Calculate audio length
- audioDuration = ffmpeg.probe(source)["format"]["duration"]
-
- if float(audioDuration) > self.inputAudioMaxDuration:
- raise ExceededMaximumDuration(videoDuration=audioDuration, maxDuration=self.inputAudioMaxDuration, message="Video is too long")
-
- file_path = pathlib.Path(source)
- sourceName = file_path.stem[:MAX_FILE_PREFIX_LENGTH] + file_path.suffix
-
- return source, sourceName
-
- def __get_max_line_width(self, language: str) -> int:
- if (language and language.lower() in ["japanese", "ja", "chinese", "zh"]):
- # Chinese characters and kana are wider, so limit line length to 40 characters
- return 40
- else:
- # TODO: Add more languages
- # 80 latin characters should fit on a 1080p/720p screen
- return 80
-
- def __get_subs(self, segments: Iterator[dict], format: str, maxLineWidth: int) -> str:
- segmentStream = StringIO()
-
- if format == 'vtt':
- write_vtt(segments, file=segmentStream, maxLineWidth=maxLineWidth)
- elif format == 'srt':
- write_srt(segments, file=segmentStream, maxLineWidth=maxLineWidth)
- else:
- raise Exception("Unknown format " + format)
-
- segmentStream.seek(0)
- return segmentStream.read()
-
- def __create_file(self, text: str, directory: str, fileName: str) -> str:
- # Write the text to a file
- with open(os.path.join(directory, fileName), 'w+', encoding="utf-8") as file:
- file.write(text)
-
- return file.name
-
- def close(self):
- self.clear_cache()
-
- if (self.parallel_context is not None):
- self.parallel_context.close()
-
-
-def create_ui(input_audio_max_duration, share=False, server_name: str = None, server_port: int = 7860,
- default_model_name: str = "medium", default_vad: str = None, vad_parallel_devices: str = None, vad_process_timeout: float = None):
- ui = WhisperTranscriber(input_audio_max_duration, vad_process_timeout)
-
- # Specify a list of devices to use for parallel processing
- ui.set_parallel_devices(vad_parallel_devices)
-
- ui_description = "Whisper is a general-purpose speech recognition model. It is trained on a large dataset of diverse "
- ui_description += " audio and is also a multi-task model that can perform multilingual speech recognition "
- ui_description += " as well as speech translation and language identification. "
-
- ui_description += "\n\n\n\nFor longer audio files (>10 minutes) not in English, it is recommended that you select Silero VAD (Voice Activity Detector) in the VAD option."
-
- if input_audio_max_duration > 0:
- ui_description += "\n\n" + "Max audio file length: " + str(input_audio_max_duration) + " s"
-
- ui_article = "Read the [documentation here](https://huggingface.co/spaces/aadnk/whisper-webui/blob/main/docs/options.md)"
-
- demo = gr.Interface(fn=ui.transcribe_webui, description=ui_description, article=ui_article, inputs=[
- gr.Dropdown(choices=["tiny", "base", "small", "medium", "large"], value=default_model_name, label="Model"),
- gr.Dropdown(choices=sorted(LANGUAGES), label="Language"),
- gr.Text(label="URL (YouTube, etc.)"),
- gr.Audio(source="upload", type="filepath", label="Upload Audio"),
- gr.Audio(source="microphone", type="filepath", label="Microphone Input"),
- gr.Dropdown(choices=["transcribe", "translate"], label="Task"),
- gr.Dropdown(choices=["none", "silero-vad", "silero-vad-skip-gaps", "silero-vad-expand-into-gaps", "periodic-vad"], value=default_vad, label="VAD"),
- gr.Number(label="VAD - Merge Window (s)", precision=0, value=5),
- gr.Number(label="VAD - Max Merge Size (s)", precision=0, value=30),
- gr.Number(label="VAD - Padding (s)", precision=None, value=1),
- gr.Number(label="VAD - Prompt Window (s)", precision=None, value=3)
- ], outputs=[
- gr.File(label="Download"),
- gr.Text(label="Transcription"),
- gr.Text(label="Segments")
- ])
-
- demo.launch(share=share, server_name=server_name, server_port=server_port)
-
- # Clean up
- ui.close()
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
- parser.add_argument("--input_audio_max_duration", type=int, default=DEFAULT_INPUT_AUDIO_MAX_DURATION, help="Maximum audio file length in seconds, or -1 for no limit.")
- parser.add_argument("--share", type=bool, default=False, help="True to share the app on HuggingFace.")
- parser.add_argument("--server_name", type=str, default=None, help="The host or IP to bind to. If None, bind to localhost.")
- parser.add_argument("--server_port", type=int, default=7860, help="The port to bind to.")
- parser.add_argument("--default_model_name", type=str, default="medium", help="The default model name.")
- parser.add_argument("--default_vad", type=str, default="silero-vad", help="The default VAD.")
- parser.add_argument("--vad_parallel_devices", type=str, default="", help="A commma delimited list of CUDA devices to use for parallel processing. If None, disable parallel processing.")
- parser.add_argument("--vad_process_timeout", type=float, default="1800", help="The number of seconds before inactivate processes are terminated. Use 0 to close processes immediately, or None for no timeout.")
-
- args = parser.parse_args().__dict__
- create_ui(**args)
\ No newline at end of file
diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/anyio/to_process.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/anyio/to_process.py
deleted file mode 100644
index 7ba9d44198233b94bea1b01c6135416170eac925..0000000000000000000000000000000000000000
--- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/anyio/to_process.py
+++ /dev/null
@@ -1,249 +0,0 @@
-from __future__ import annotations
-
-import os
-import pickle
-import subprocess
-import sys
-from collections import deque
-from importlib.util import module_from_spec, spec_from_file_location
-from typing import Callable, TypeVar, cast
-
-from ._core._eventloop import current_time, get_asynclib, get_cancelled_exc_class
-from ._core._exceptions import BrokenWorkerProcess
-from ._core._subprocesses import open_process
-from ._core._synchronization import CapacityLimiter
-from ._core._tasks import CancelScope, fail_after
-from .abc import ByteReceiveStream, ByteSendStream, Process
-from .lowlevel import RunVar, checkpoint_if_cancelled
-from .streams.buffered import BufferedByteReceiveStream
-
-WORKER_MAX_IDLE_TIME = 300 # 5 minutes
-
-T_Retval = TypeVar("T_Retval")
-_process_pool_workers: RunVar[set[Process]] = RunVar("_process_pool_workers")
-_process_pool_idle_workers: RunVar[deque[tuple[Process, float]]] = RunVar(
- "_process_pool_idle_workers"
-)
-_default_process_limiter: RunVar[CapacityLimiter] = RunVar("_default_process_limiter")
-
-
-async def run_sync(
- func: Callable[..., T_Retval],
- *args: object,
- cancellable: bool = False,
- limiter: CapacityLimiter | None = None,
-) -> T_Retval:
- """
- Call the given function with the given arguments in a worker process.
-
- If the ``cancellable`` option is enabled and the task waiting for its completion is cancelled,
- the worker process running it will be abruptly terminated using SIGKILL (or
- ``terminateProcess()`` on Windows).
-
- :param func: a callable
- :param args: positional arguments for the callable
- :param cancellable: ``True`` to allow cancellation of the operation while it's running
- :param limiter: capacity limiter to use to limit the total amount of processes running
- (if omitted, the default limiter is used)
- :return: an awaitable that yields the return value of the function.
-
- """
-
- async def send_raw_command(pickled_cmd: bytes) -> object:
- try:
- await stdin.send(pickled_cmd)
- response = await buffered.receive_until(b"\n", 50)
- status, length = response.split(b" ")
- if status not in (b"RETURN", b"EXCEPTION"):
- raise RuntimeError(
- f"Worker process returned unexpected response: {response!r}"
- )
-
- pickled_response = await buffered.receive_exactly(int(length))
- except BaseException as exc:
- workers.discard(process)
- try:
- process.kill()
- with CancelScope(shield=True):
- await process.aclose()
- except ProcessLookupError:
- pass
-
- if isinstance(exc, get_cancelled_exc_class()):
- raise
- else:
- raise BrokenWorkerProcess from exc
-
- retval = pickle.loads(pickled_response)
- if status == b"EXCEPTION":
- assert isinstance(retval, BaseException)
- raise retval
- else:
- return retval
-
- # First pickle the request before trying to reserve a worker process
- await checkpoint_if_cancelled()
- request = pickle.dumps(("run", func, args), protocol=pickle.HIGHEST_PROTOCOL)
-
- # If this is the first run in this event loop thread, set up the necessary variables
- try:
- workers = _process_pool_workers.get()
- idle_workers = _process_pool_idle_workers.get()
- except LookupError:
- workers = set()
- idle_workers = deque()
- _process_pool_workers.set(workers)
- _process_pool_idle_workers.set(idle_workers)
- get_asynclib().setup_process_pool_exit_at_shutdown(workers)
-
- async with (limiter or current_default_process_limiter()):
- # Pop processes from the pool (starting from the most recently used) until we find one that
- # hasn't exited yet
- process: Process
- while idle_workers:
- process, idle_since = idle_workers.pop()
- if process.returncode is None:
- stdin = cast(ByteSendStream, process.stdin)
- buffered = BufferedByteReceiveStream(
- cast(ByteReceiveStream, process.stdout)
- )
-
- # Prune any other workers that have been idle for WORKER_MAX_IDLE_TIME seconds or
- # longer
- now = current_time()
- killed_processes: list[Process] = []
- while idle_workers:
- if now - idle_workers[0][1] < WORKER_MAX_IDLE_TIME:
- break
-
- process, idle_since = idle_workers.popleft()
- process.kill()
- workers.remove(process)
- killed_processes.append(process)
-
- with CancelScope(shield=True):
- for process in killed_processes:
- await process.aclose()
-
- break
-
- workers.remove(process)
- else:
- command = [sys.executable, "-u", "-m", __name__]
- process = await open_process(
- command, stdin=subprocess.PIPE, stdout=subprocess.PIPE
- )
- try:
- stdin = cast(ByteSendStream, process.stdin)
- buffered = BufferedByteReceiveStream(
- cast(ByteReceiveStream, process.stdout)
- )
- with fail_after(20):
- message = await buffered.receive(6)
-
- if message != b"READY\n":
- raise BrokenWorkerProcess(
- f"Worker process returned unexpected response: {message!r}"
- )
-
- main_module_path = getattr(sys.modules["__main__"], "__file__", None)
- pickled = pickle.dumps(
- ("init", sys.path, main_module_path),
- protocol=pickle.HIGHEST_PROTOCOL,
- )
- await send_raw_command(pickled)
- except (BrokenWorkerProcess, get_cancelled_exc_class()):
- raise
- except BaseException as exc:
- process.kill()
- raise BrokenWorkerProcess(
- "Error during worker process initialization"
- ) from exc
-
- workers.add(process)
-
- with CancelScope(shield=not cancellable):
- try:
- return cast(T_Retval, await send_raw_command(request))
- finally:
- if process in workers:
- idle_workers.append((process, current_time()))
-
-
-def current_default_process_limiter() -> CapacityLimiter:
- """
- Return the capacity limiter that is used by default to limit the number of worker processes.
-
- :return: a capacity limiter object
-
- """
- try:
- return _default_process_limiter.get()
- except LookupError:
- limiter = CapacityLimiter(os.cpu_count() or 2)
- _default_process_limiter.set(limiter)
- return limiter
-
-
-def process_worker() -> None:
- # Redirect standard streams to os.devnull so that user code won't interfere with the
- # parent-worker communication
- stdin = sys.stdin
- stdout = sys.stdout
- sys.stdin = open(os.devnull)
- sys.stdout = open(os.devnull, "w")
-
- stdout.buffer.write(b"READY\n")
- while True:
- retval = exception = None
- try:
- command, *args = pickle.load(stdin.buffer)
- except EOFError:
- return
- except BaseException as exc:
- exception = exc
- else:
- if command == "run":
- func, args = args
- try:
- retval = func(*args)
- except BaseException as exc:
- exception = exc
- elif command == "init":
- main_module_path: str | None
- sys.path, main_module_path = args
- del sys.modules["__main__"]
- if main_module_path:
- # Load the parent's main module but as __mp_main__ instead of __main__
- # (like multiprocessing does) to avoid infinite recursion
- try:
- spec = spec_from_file_location("__mp_main__", main_module_path)
- if spec and spec.loader:
- main = module_from_spec(spec)
- spec.loader.exec_module(main)
- sys.modules["__main__"] = main
- except BaseException as exc:
- exception = exc
-
- try:
- if exception is not None:
- status = b"EXCEPTION"
- pickled = pickle.dumps(exception, pickle.HIGHEST_PROTOCOL)
- else:
- status = b"RETURN"
- pickled = pickle.dumps(retval, pickle.HIGHEST_PROTOCOL)
- except BaseException as exc:
- exception = exc
- status = b"EXCEPTION"
- pickled = pickle.dumps(exc, pickle.HIGHEST_PROTOCOL)
-
- stdout.buffer.write(b"%s %d\n" % (status, len(pickled)))
- stdout.buffer.write(pickled)
-
- # Respect SIGTERM
- if isinstance(exception, SystemExit):
- raise exception
-
-
-if __name__ == "__main__":
- process_worker()
diff --git a/spaces/declare-lab/tango/diffusers/examples/text_to_image/train_text_to_image.py b/spaces/declare-lab/tango/diffusers/examples/text_to_image/train_text_to_image.py
deleted file mode 100644
index bf2d1e81912e5c1448c217bf6b4d23c3d8fd7640..0000000000000000000000000000000000000000
--- a/spaces/declare-lab/tango/diffusers/examples/text_to_image/train_text_to_image.py
+++ /dev/null
@@ -1,781 +0,0 @@
-#!/usr/bin/env python
-# coding=utf-8
-# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-
-import argparse
-import logging
-import math
-import os
-import random
-from pathlib import Path
-
-import accelerate
-import datasets
-import numpy as np
-import torch
-import torch.nn.functional as F
-import torch.utils.checkpoint
-import transformers
-from accelerate import Accelerator
-from accelerate.logging import get_logger
-from accelerate.utils import ProjectConfiguration, set_seed
-from datasets import load_dataset
-from huggingface_hub import create_repo, upload_folder
-from packaging import version
-from torchvision import transforms
-from tqdm.auto import tqdm
-from transformers import CLIPTextModel, CLIPTokenizer
-
-import diffusers
-from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel
-from diffusers.optimization import get_scheduler
-from diffusers.training_utils import EMAModel
-from diffusers.utils import check_min_version, deprecate
-from diffusers.utils.import_utils import is_xformers_available
-
-
-# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
-check_min_version("0.15.0.dev0")
-
-logger = get_logger(__name__, log_level="INFO")
-
-
-def parse_args():
- parser = argparse.ArgumentParser(description="Simple example of a training script.")
- parser.add_argument(
- "--pretrained_model_name_or_path",
- type=str,
- default=None,
- required=True,
- help="Path to pretrained model or model identifier from huggingface.co/models.",
- )
- parser.add_argument(
- "--revision",
- type=str,
- default=None,
- required=False,
- help="Revision of pretrained model identifier from huggingface.co/models.",
- )
- parser.add_argument(
- "--dataset_name",
- type=str,
- default=None,
- help=(
- "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private,"
- " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem,"
- " or to a folder containing files that 🤗 Datasets can understand."
- ),
- )
- parser.add_argument(
- "--dataset_config_name",
- type=str,
- default=None,
- help="The config of the Dataset, leave as None if there's only one config.",
- )
- parser.add_argument(
- "--train_data_dir",
- type=str,
- default=None,
- help=(
- "A folder containing the training data. Folder contents must follow the structure described in"
- " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file"
- " must exist to provide the captions for the images. Ignored if `dataset_name` is specified."
- ),
- )
- parser.add_argument(
- "--image_column", type=str, default="image", help="The column of the dataset containing an image."
- )
- parser.add_argument(
- "--caption_column",
- type=str,
- default="text",
- help="The column of the dataset containing a caption or a list of captions.",
- )
- parser.add_argument(
- "--max_train_samples",
- type=int,
- default=None,
- help=(
- "For debugging purposes or quicker training, truncate the number of training examples to this "
- "value if set."
- ),
- )
- parser.add_argument(
- "--output_dir",
- type=str,
- default="sd-model-finetuned",
- help="The output directory where the model predictions and checkpoints will be written.",
- )
- parser.add_argument(
- "--cache_dir",
- type=str,
- default=None,
- help="The directory where the downloaded models and datasets will be stored.",
- )
- parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
- parser.add_argument(
- "--resolution",
- type=int,
- default=512,
- help=(
- "The resolution for input images, all the images in the train/validation dataset will be resized to this"
- " resolution"
- ),
- )
- parser.add_argument(
- "--center_crop",
- default=False,
- action="store_true",
- help=(
- "Whether to center crop the input images to the resolution. If not set, the images will be randomly"
- " cropped. The images will be resized to the resolution first before cropping."
- ),
- )
- parser.add_argument(
- "--random_flip",
- action="store_true",
- help="whether to randomly flip images horizontally",
- )
- parser.add_argument(
- "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader."
- )
- parser.add_argument("--num_train_epochs", type=int, default=100)
- parser.add_argument(
- "--max_train_steps",
- type=int,
- default=None,
- help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
- )
- parser.add_argument(
- "--gradient_accumulation_steps",
- type=int,
- default=1,
- help="Number of updates steps to accumulate before performing a backward/update pass.",
- )
- parser.add_argument(
- "--gradient_checkpointing",
- action="store_true",
- help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
- )
- parser.add_argument(
- "--learning_rate",
- type=float,
- default=1e-4,
- help="Initial learning rate (after the potential warmup period) to use.",
- )
- parser.add_argument(
- "--scale_lr",
- action="store_true",
- default=False,
- help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
- )
- parser.add_argument(
- "--lr_scheduler",
- type=str,
- default="constant",
- help=(
- 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
- ' "constant", "constant_with_warmup"]'
- ),
- )
- parser.add_argument(
- "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
- )
- parser.add_argument(
- "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
- )
- parser.add_argument(
- "--allow_tf32",
- action="store_true",
- help=(
- "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
- " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
- ),
- )
- parser.add_argument("--use_ema", action="store_true", help="Whether to use EMA model.")
- parser.add_argument(
- "--non_ema_revision",
- type=str,
- default=None,
- required=False,
- help=(
- "Revision of pretrained non-ema model identifier. Must be a branch, tag or git identifier of the local or"
- " remote repository specified with --pretrained_model_name_or_path."
- ),
- )
- parser.add_argument(
- "--dataloader_num_workers",
- type=int,
- default=0,
- help=(
- "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
- ),
- )
- parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
- parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
- parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
- parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
- parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
- parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
- parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
- parser.add_argument(
- "--hub_model_id",
- type=str,
- default=None,
- help="The name of the repository to keep in sync with the local `output_dir`.",
- )
- parser.add_argument(
- "--logging_dir",
- type=str,
- default="logs",
- help=(
- "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
- " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
- ),
- )
- parser.add_argument(
- "--mixed_precision",
- type=str,
- default=None,
- choices=["no", "fp16", "bf16"],
- help=(
- "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
- " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
- " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
- ),
- )
- parser.add_argument(
- "--report_to",
- type=str,
- default="tensorboard",
- help=(
- 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
- ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
- ),
- )
- parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
- parser.add_argument(
- "--checkpointing_steps",
- type=int,
- default=500,
- help=(
- "Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming"
- " training using `--resume_from_checkpoint`."
- ),
- )
- parser.add_argument(
- "--checkpoints_total_limit",
- type=int,
- default=None,
- help=(
- "Max number of checkpoints to store. Passed as `total_limit` to the `Accelerator` `ProjectConfiguration`."
- " See Accelerator::save_state https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.save_state"
- " for more docs"
- ),
- )
- parser.add_argument(
- "--resume_from_checkpoint",
- type=str,
- default=None,
- help=(
- "Whether training should be resumed from a previous checkpoint. Use a path saved by"
- ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
- ),
- )
- parser.add_argument(
- "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
- )
- parser.add_argument("--noise_offset", type=float, default=0, help="The scale of noise offset.")
-
- args = parser.parse_args()
- env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
- if env_local_rank != -1 and env_local_rank != args.local_rank:
- args.local_rank = env_local_rank
-
- # Sanity checks
- if args.dataset_name is None and args.train_data_dir is None:
- raise ValueError("Need either a dataset name or a training folder.")
-
- # default to using the same revision for the non-ema model if not specified
- if args.non_ema_revision is None:
- args.non_ema_revision = args.revision
-
- return args
-
-
-dataset_name_mapping = {
- "lambdalabs/pokemon-blip-captions": ("image", "text"),
-}
-
-
-def main():
- args = parse_args()
-
- if args.non_ema_revision is not None:
- deprecate(
- "non_ema_revision!=None",
- "0.15.0",
- message=(
- "Downloading 'non_ema' weights from revision branches of the Hub is deprecated. Please make sure to"
- " use `--variant=non_ema` instead."
- ),
- )
- logging_dir = os.path.join(args.output_dir, args.logging_dir)
-
- accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit)
-
- accelerator = Accelerator(
- gradient_accumulation_steps=args.gradient_accumulation_steps,
- mixed_precision=args.mixed_precision,
- log_with=args.report_to,
- logging_dir=logging_dir,
- project_config=accelerator_project_config,
- )
-
- # Make one log on every process with the configuration for debugging.
- logging.basicConfig(
- format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
- datefmt="%m/%d/%Y %H:%M:%S",
- level=logging.INFO,
- )
- logger.info(accelerator.state, main_process_only=False)
- if accelerator.is_local_main_process:
- datasets.utils.logging.set_verbosity_warning()
- transformers.utils.logging.set_verbosity_warning()
- diffusers.utils.logging.set_verbosity_info()
- else:
- datasets.utils.logging.set_verbosity_error()
- transformers.utils.logging.set_verbosity_error()
- diffusers.utils.logging.set_verbosity_error()
-
- # If passed along, set the training seed now.
- if args.seed is not None:
- set_seed(args.seed)
-
- # Handle the repository creation
- if accelerator.is_main_process:
- if args.output_dir is not None:
- os.makedirs(args.output_dir, exist_ok=True)
-
- if args.push_to_hub:
- repo_id = create_repo(
- repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token
- ).repo_id
-
- # Load scheduler, tokenizer and models.
- noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
- tokenizer = CLIPTokenizer.from_pretrained(
- args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision
- )
- text_encoder = CLIPTextModel.from_pretrained(
- args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision
- )
- vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision)
- unet = UNet2DConditionModel.from_pretrained(
- args.pretrained_model_name_or_path, subfolder="unet", revision=args.non_ema_revision
- )
-
- # Freeze vae and text_encoder
- vae.requires_grad_(False)
- text_encoder.requires_grad_(False)
-
- # Create EMA for the unet.
- if args.use_ema:
- ema_unet = UNet2DConditionModel.from_pretrained(
- args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision
- )
- ema_unet = EMAModel(ema_unet.parameters(), model_cls=UNet2DConditionModel, model_config=ema_unet.config)
-
- if args.enable_xformers_memory_efficient_attention:
- if is_xformers_available():
- import xformers
-
- xformers_version = version.parse(xformers.__version__)
- if xformers_version == version.parse("0.0.16"):
- logger.warn(
- "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
- )
- unet.enable_xformers_memory_efficient_attention()
- else:
- raise ValueError("xformers is not available. Make sure it is installed correctly")
-
- # `accelerate` 0.16.0 will have better support for customized saving
- if version.parse(accelerate.__version__) >= version.parse("0.16.0"):
- # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format
- def save_model_hook(models, weights, output_dir):
- if args.use_ema:
- ema_unet.save_pretrained(os.path.join(output_dir, "unet_ema"))
-
- for i, model in enumerate(models):
- model.save_pretrained(os.path.join(output_dir, "unet"))
-
- # make sure to pop weight so that corresponding model is not saved again
- weights.pop()
-
- def load_model_hook(models, input_dir):
- if args.use_ema:
- load_model = EMAModel.from_pretrained(os.path.join(input_dir, "unet_ema"), UNet2DConditionModel)
- ema_unet.load_state_dict(load_model.state_dict())
- ema_unet.to(accelerator.device)
- del load_model
-
- for i in range(len(models)):
- # pop models so that they are not loaded again
- model = models.pop()
-
- # load diffusers style into model
- load_model = UNet2DConditionModel.from_pretrained(input_dir, subfolder="unet")
- model.register_to_config(**load_model.config)
-
- model.load_state_dict(load_model.state_dict())
- del load_model
-
- accelerator.register_save_state_pre_hook(save_model_hook)
- accelerator.register_load_state_pre_hook(load_model_hook)
-
- if args.gradient_checkpointing:
- unet.enable_gradient_checkpointing()
-
- # Enable TF32 for faster training on Ampere GPUs,
- # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
- if args.allow_tf32:
- torch.backends.cuda.matmul.allow_tf32 = True
-
- if args.scale_lr:
- args.learning_rate = (
- args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
- )
-
- # Initialize the optimizer
- if args.use_8bit_adam:
- try:
- import bitsandbytes as bnb
- except ImportError:
- raise ImportError(
- "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`"
- )
-
- optimizer_cls = bnb.optim.AdamW8bit
- else:
- optimizer_cls = torch.optim.AdamW
-
- optimizer = optimizer_cls(
- unet.parameters(),
- lr=args.learning_rate,
- betas=(args.adam_beta1, args.adam_beta2),
- weight_decay=args.adam_weight_decay,
- eps=args.adam_epsilon,
- )
-
- # Get the datasets: you can either provide your own training and evaluation files (see below)
- # or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub).
-
- # In distributed training, the load_dataset function guarantees that only one local process can concurrently
- # download the dataset.
- if args.dataset_name is not None:
- # Downloading and loading a dataset from the hub.
- dataset = load_dataset(
- args.dataset_name,
- args.dataset_config_name,
- cache_dir=args.cache_dir,
- )
- else:
- data_files = {}
- if args.train_data_dir is not None:
- data_files["train"] = os.path.join(args.train_data_dir, "**")
- dataset = load_dataset(
- "imagefolder",
- data_files=data_files,
- cache_dir=args.cache_dir,
- )
- # See more about loading custom images at
- # https://huggingface.co/docs/datasets/v2.4.0/en/image_load#imagefolder
-
- # Preprocessing the datasets.
- # We need to tokenize inputs and targets.
- column_names = dataset["train"].column_names
-
- # 6. Get the column names for input/target.
- dataset_columns = dataset_name_mapping.get(args.dataset_name, None)
- if args.image_column is None:
- image_column = dataset_columns[0] if dataset_columns is not None else column_names[0]
- else:
- image_column = args.image_column
- if image_column not in column_names:
- raise ValueError(
- f"--image_column' value '{args.image_column}' needs to be one of: {', '.join(column_names)}"
- )
- if args.caption_column is None:
- caption_column = dataset_columns[1] if dataset_columns is not None else column_names[1]
- else:
- caption_column = args.caption_column
- if caption_column not in column_names:
- raise ValueError(
- f"--caption_column' value '{args.caption_column}' needs to be one of: {', '.join(column_names)}"
- )
-
- # Preprocessing the datasets.
- # We need to tokenize input captions and transform the images.
- def tokenize_captions(examples, is_train=True):
- captions = []
- for caption in examples[caption_column]:
- if isinstance(caption, str):
- captions.append(caption)
- elif isinstance(caption, (list, np.ndarray)):
- # take a random caption if there are multiple
- captions.append(random.choice(caption) if is_train else caption[0])
- else:
- raise ValueError(
- f"Caption column `{caption_column}` should contain either strings or lists of strings."
- )
- inputs = tokenizer(
- captions, max_length=tokenizer.model_max_length, padding="max_length", truncation=True, return_tensors="pt"
- )
- return inputs.input_ids
-
- # Preprocessing the datasets.
- train_transforms = transforms.Compose(
- [
- transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR),
- transforms.CenterCrop(args.resolution) if args.center_crop else transforms.RandomCrop(args.resolution),
- transforms.RandomHorizontalFlip() if args.random_flip else transforms.Lambda(lambda x: x),
- transforms.ToTensor(),
- transforms.Normalize([0.5], [0.5]),
- ]
- )
-
- def preprocess_train(examples):
- images = [image.convert("RGB") for image in examples[image_column]]
- examples["pixel_values"] = [train_transforms(image) for image in images]
- examples["input_ids"] = tokenize_captions(examples)
- return examples
-
- with accelerator.main_process_first():
- if args.max_train_samples is not None:
- dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples))
- # Set the training transforms
- train_dataset = dataset["train"].with_transform(preprocess_train)
-
- def collate_fn(examples):
- pixel_values = torch.stack([example["pixel_values"] for example in examples])
- pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
- input_ids = torch.stack([example["input_ids"] for example in examples])
- return {"pixel_values": pixel_values, "input_ids": input_ids}
-
- # DataLoaders creation:
- train_dataloader = torch.utils.data.DataLoader(
- train_dataset,
- shuffle=True,
- collate_fn=collate_fn,
- batch_size=args.train_batch_size,
- num_workers=args.dataloader_num_workers,
- )
-
- # Scheduler and math around the number of training steps.
- overrode_max_train_steps = False
- num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
- if args.max_train_steps is None:
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
- overrode_max_train_steps = True
-
- lr_scheduler = get_scheduler(
- args.lr_scheduler,
- optimizer=optimizer,
- num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps,
- num_training_steps=args.max_train_steps * args.gradient_accumulation_steps,
- )
-
- # Prepare everything with our `accelerator`.
- unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
- unet, optimizer, train_dataloader, lr_scheduler
- )
-
- if args.use_ema:
- ema_unet.to(accelerator.device)
-
- # For mixed precision training we cast the text_encoder and vae weights to half-precision
- # as these models are only used for inference, keeping weights in full precision is not required.
- weight_dtype = torch.float32
- if accelerator.mixed_precision == "fp16":
- weight_dtype = torch.float16
- elif accelerator.mixed_precision == "bf16":
- weight_dtype = torch.bfloat16
-
- # Move text_encode and vae to gpu and cast to weight_dtype
- text_encoder.to(accelerator.device, dtype=weight_dtype)
- vae.to(accelerator.device, dtype=weight_dtype)
-
- # We need to recalculate our total training steps as the size of the training dataloader may have changed.
- num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
- if overrode_max_train_steps:
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
- # Afterwards we recalculate our number of training epochs
- args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
-
- # We need to initialize the trackers we use, and also store our configuration.
- # The trackers initializes automatically on the main process.
- if accelerator.is_main_process:
- accelerator.init_trackers("text2image-fine-tune", config=vars(args))
-
- # Train!
- total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
-
- logger.info("***** Running training *****")
- logger.info(f" Num examples = {len(train_dataset)}")
- logger.info(f" Num Epochs = {args.num_train_epochs}")
- logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
- logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
- logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
- logger.info(f" Total optimization steps = {args.max_train_steps}")
- global_step = 0
- first_epoch = 0
-
- # Potentially load in the weights and states from a previous save
- if args.resume_from_checkpoint:
- if args.resume_from_checkpoint != "latest":
- path = os.path.basename(args.resume_from_checkpoint)
- else:
- # Get the most recent checkpoint
- dirs = os.listdir(args.output_dir)
- dirs = [d for d in dirs if d.startswith("checkpoint")]
- dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
- path = dirs[-1] if len(dirs) > 0 else None
-
- if path is None:
- accelerator.print(
- f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
- )
- args.resume_from_checkpoint = None
- else:
- accelerator.print(f"Resuming from checkpoint {path}")
- accelerator.load_state(os.path.join(args.output_dir, path))
- global_step = int(path.split("-")[1])
-
- resume_global_step = global_step * args.gradient_accumulation_steps
- first_epoch = global_step // num_update_steps_per_epoch
- resume_step = resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps)
-
- # Only show the progress bar once on each machine.
- progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process)
- progress_bar.set_description("Steps")
-
- for epoch in range(first_epoch, args.num_train_epochs):
- unet.train()
- train_loss = 0.0
- for step, batch in enumerate(train_dataloader):
- # Skip steps until we reach the resumed step
- if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step:
- if step % args.gradient_accumulation_steps == 0:
- progress_bar.update(1)
- continue
-
- with accelerator.accumulate(unet):
- # Convert images to latent space
- latents = vae.encode(batch["pixel_values"].to(weight_dtype)).latent_dist.sample()
- latents = latents * vae.config.scaling_factor
-
- # Sample noise that we'll add to the latents
- noise = torch.randn_like(latents)
- if args.noise_offset:
- # https://www.crosslabs.org//blog/diffusion-with-offset-noise
- noise += args.noise_offset * torch.randn(
- (latents.shape[0], latents.shape[1], 1, 1), device=latents.device
- )
-
- bsz = latents.shape[0]
- # Sample a random timestep for each image
- timesteps = torch.randint(0, noise_scheduler.num_train_timesteps, (bsz,), device=latents.device)
- timesteps = timesteps.long()
-
- # Add noise to the latents according to the noise magnitude at each timestep
- # (this is the forward diffusion process)
- noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
-
- # Get the text embedding for conditioning
- encoder_hidden_states = text_encoder(batch["input_ids"])[0]
-
- # Get the target for loss depending on the prediction type
- if noise_scheduler.config.prediction_type == "epsilon":
- target = noise
- elif noise_scheduler.config.prediction_type == "v_prediction":
- target = noise_scheduler.get_velocity(latents, noise, timesteps)
- else:
- raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
-
- # Predict the noise residual and compute loss
- model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample
- loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
-
- # Gather the losses across all processes for logging (if we use distributed training).
- avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean()
- train_loss += avg_loss.item() / args.gradient_accumulation_steps
-
- # Backpropagate
- accelerator.backward(loss)
- if accelerator.sync_gradients:
- accelerator.clip_grad_norm_(unet.parameters(), args.max_grad_norm)
- optimizer.step()
- lr_scheduler.step()
- optimizer.zero_grad()
-
- # Checks if the accelerator has performed an optimization step behind the scenes
- if accelerator.sync_gradients:
- if args.use_ema:
- ema_unet.step(unet.parameters())
- progress_bar.update(1)
- global_step += 1
- accelerator.log({"train_loss": train_loss}, step=global_step)
- train_loss = 0.0
-
- if global_step % args.checkpointing_steps == 0:
- if accelerator.is_main_process:
- save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
- accelerator.save_state(save_path)
- logger.info(f"Saved state to {save_path}")
-
- logs = {"step_loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
- progress_bar.set_postfix(**logs)
-
- if global_step >= args.max_train_steps:
- break
-
- # Create the pipeline using the trained modules and save it.
- accelerator.wait_for_everyone()
- if accelerator.is_main_process:
- unet = accelerator.unwrap_model(unet)
- if args.use_ema:
- ema_unet.copy_to(unet.parameters())
-
- pipeline = StableDiffusionPipeline.from_pretrained(
- args.pretrained_model_name_or_path,
- text_encoder=text_encoder,
- vae=vae,
- unet=unet,
- revision=args.revision,
- )
- pipeline.save_pretrained(args.output_dir)
-
- if args.push_to_hub:
- upload_folder(
- repo_id=repo_id,
- folder_path=args.output_dir,
- commit_message="End of training",
- ignore_patterns=["step_*", "epoch_*"],
- )
-
- accelerator.end_training()
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/declare-lab/tango/diffusers/scripts/__init__.py b/spaces/declare-lab/tango/diffusers/scripts/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/decodemai/chatgpt_prompts/README.md b/spaces/decodemai/chatgpt_prompts/README.md
deleted file mode 100644
index 91e5b08e2727302f737fbbe5af4dcc345a1b9cec..0000000000000000000000000000000000000000
--- a/spaces/decodemai/chatgpt_prompts/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Chatgpt Prompts
-emoji: 💻
-colorFrom: green
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.16.2
-app_file: app.py
-pinned: false
-license: cc-by-nc-nd-4.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/derek-thomas/QADemo/utilities/FAQ.py b/spaces/derek-thomas/QADemo/utilities/FAQ.py
deleted file mode 100644
index 14513aa84f5e50f2a49fad44b26288dfde79fe98..0000000000000000000000000000000000000000
--- a/spaces/derek-thomas/QADemo/utilities/FAQ.py
+++ /dev/null
@@ -1,47 +0,0 @@
-FAQ = """
-# Motivation
-I wanted to create a space where I can compare different methods of QA on a moderately large corpus.
-
-Retrievers:
-1. Sparse: BM25
-2. Dense: [all-mpnet-base-v2](https://huggingface.co/sentence-transformers/all-mpnet-base-v2)
-
-Readers:
-1. Extractive: [deepset/deberta-v3-base-squad2](https://huggingface.co/deepset/deberta-v3-base-squad2)
-1. Generative: [vblagoje/bart_lfqa](https://huggingface.co/vblagoje/bart_lfqa)
-
-# Observations
-1. Haystack makes this process REALLY easy
-2. Processing didnt take that long for ~300k -> 30 min using colab pro
-3. There are lots of options to build on this, Im really interested in using something like FLAN-T5 on the output
-
-# Questions
-#### Why am I getting bad answers for the 3rd example?
-> What is the tallest mountain on Earth?
-
-The Dense retriever is not doing a good job of finding the right article, so the reader has no chance!
-Increase `Retriever Top_k` to 50 and you will see that you get the right answer on both. This is a good way of
-showing that the retriever and reader both need to work well to get good results.
-
-#### How did you train this?
-I used off the shelf transformers and didn't fine-tune. These are usually trained on wikipedia so we can consider
-the data in-domain.
-
-For more information see this [notebook](https://huggingface.co/spaces/derek-thomas/QADemo/blob/main/notebooks/colab_haystack.ipynb).
-
-#### What data did you use?
-169597 [Simple Wikipedia articles](http://sbert.net/datasets/simplewiki-2020-11-01.jsonl.gz) processed as shown in the
-[notebook](https://huggingface.co/spaces/derek-thomas/QADemo/blob/main/notebooks/colab_haystack.ipynb).
-Downloaded from [here](http://sbert.net/datasets/)
-
-#### Were there any unexpected challenges?
-I used colab to process the dense embeddings. It took me a bit to realize that haystack stores the embeddings in numpy 🙏🏾.
-But the retriever was saved with cuda settings. You only have to change the settings, not the embeddings.
-
-#### What is `no_ans_gap`?
-Check it out [here](https://github.com/deepset-ai/haystack/issues/897).
-
-# What is next?
-- ~~[LFQA](https://www.pinecone.io/learn/haystack-lfqa/) [LFQA2](https://haystack.deepset.ai/tutorials/12_lfqa)~~
-- Add progress bars
-"""
diff --git a/spaces/diacanFperku/AutoGPT/Circuit Wizard 3 Free Download Full 92 [PORTABLE].md b/spaces/diacanFperku/AutoGPT/Circuit Wizard 3 Free Download Full 92 [PORTABLE].md
deleted file mode 100644
index 12c372317777b7a28c1d7593f53d69aba1e494c3..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/Circuit Wizard 3 Free Download Full 92 [PORTABLE].md
+++ /dev/null
@@ -1,6 +0,0 @@
-Circuit Wizard 3 Free Download Full 92
DOWNLOAD >>> https://gohhs.com/2uFSVZ
-
-I try to connect my ICD2 with USB , target powered ... I have desinstalled all MPLAB IDE V8.92 from my PC and download from ... Only Release 3 and Release 4 (R3/R4, see on the back side of ICD2) ... On another machine I had to use the wizard to setup the ICD2 properly before it connected to my PIC. 1fdad05405
-
-
-
diff --git a/spaces/diacanFperku/AutoGPT/Electrician Simulator Full Extra Quality Crack [Torrent].md b/spaces/diacanFperku/AutoGPT/Electrician Simulator Full Extra Quality Crack [Torrent].md
deleted file mode 100644
index be11ce872b13de8b98f0582ad7bff72cc0091d19..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/Electrician Simulator Full Extra Quality Crack [Torrent].md
+++ /dev/null
@@ -1,6 +0,0 @@
-Electrician Simulator Full Crack [Torrent]
Download →→→ https://gohhs.com/2uFUga
-
- 4fefd39f24
-
-
-
diff --git a/spaces/diacanFperku/AutoGPT/HD Online Player (Maheruh 1 Hd Movie Download).md b/spaces/diacanFperku/AutoGPT/HD Online Player (Maheruh 1 Hd Movie Download).md
deleted file mode 100644
index c3a7734565066397f5d3517469824ed9b45f733d..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/HD Online Player (Maheruh 1 Hd Movie Download).md
+++ /dev/null
@@ -1,22 +0,0 @@
-
-How to Watch Maheruh Online in HD Quality
-Maheruh is a 2017 Hindi romance movie directed by Vithal Sunita Veturkar and starring Amit Dolawat, Dilip Karad and Drisha More. The movie tells the story of Vikram Oberoi, a wealthy businessman who falls in love with a simple girl named Jahaan. However, their relationship faces many challenges due to their different backgrounds and family issues.
-HD Online Player (Maheruh 1 Hd Movie Download)
Download Zip ✏ https://gohhs.com/2uFTp0
-If you want to watch Maheruh online in HD quality, you have several options. One of them is to use an HD online player that can stream or download the movie from various sources. Here are some of the best HD online players that you can use:
-
-- MX Player: This is a popular video player app that supports various formats and codecs. You can also use it to stream or download movies from various websites. To watch Maheruh online using MX Player, you need to visit the mobile app store and download the app. Then, you need to search for Maheruh on the app and select a source that offers HD quality. You can then choose to stream or download the movie.
-- Zappiti: This is a media center software that can organize and play your movies, TV shows, music and photos. You can also use it to stream or download movies from various online sources. To watch Maheruh online using Zappiti, you need to visit the official website and download the software. Then, you need to install it on your device and launch it. You can then search for Maheruh on the software and select a source that offers HD quality. You can then choose to stream or download the movie.
-- VLC Media Player: This is a versatile video player that can play almost any format and codec. You can also use it to stream or download movies from various websites. To watch Maheruh online using VLC Media Player, you need to visit the official website and download the software. Then, you need to install it on your device and launch it. You can then go to the Media menu and select Open Network Stream. You can then enter the URL of a website that offers Maheruh in HD quality and click Play.
-
-These are some of the best HD online players that you can use to watch Maheruh online in HD quality. However, you should be aware of the legal and ethical issues involved in streaming or downloading movies from unauthorized sources. You should always respect the rights of the creators and distributors of the movies and only use legal and authorized sources.
-
-If you are looking for some other ways to watch Maheruh online in HD quality, you can also try some of the following options:
-
-- Amazon Prime Video: This is a subscription-based streaming service that offers a wide range of movies and TV shows. You can also download some of the content for offline viewing. To watch Maheruh online using Amazon Prime Video, you need to visit the official website and sign up for a membership. Then, you need to search for Maheruh on the website and click Watch Now. You can then stream the movie in HD quality.
-- YouTube: This is a free video-sharing platform that hosts millions of videos. You can also rent or buy some of the movies and TV shows on the platform. To watch Maheruh online using YouTube, you need to visit the official website and search for Maheruh. You can then select a video that offers the movie in HD quality. You can then click Rent or Buy and pay a fee to stream or download the movie.
-- Netflix: This is another subscription-based streaming service that offers a large collection of movies and TV shows. You can also download some of the content for offline viewing. To watch Maheruh online using Netflix, you need to visit the official website and sign up for a plan. Then, you need to search for Maheruh on the website and click Play. You can then stream the movie in HD quality.
-
-These are some other ways to watch Maheruh online in HD quality. However, you should always check the availability and legality of the movies and TV shows on these platforms before watching them. You should also respect the ratings and parental guidance of the content and watch them responsibly.
- d5da3c52bf
-
-
\ No newline at end of file
diff --git a/spaces/diacanFperku/AutoGPT/Hap.Hazard.v2.0-SLAM Version Download TOP.md b/spaces/diacanFperku/AutoGPT/Hap.Hazard.v2.0-SLAM Version Download TOP.md
deleted file mode 100644
index 9e404793cccea3bd5cbb8b116914a7634d38f9a1..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/Hap.Hazard.v2.0-SLAM Version Download TOP.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Hap.Hazard.v2.0-SLAM Version Download
Download https://gohhs.com/2uFTMi
-
-20. Jielin Yu. WR 100 Essays. Two Sides to Every Story. 32. Micaela Bedell. Darkness ... ing to me because I have to deal with issues such as illegally downloading music, ... the arguments of how the DMCA has worked correctly to protect free speech. ... Rather Dante fills us in on the rest of the story, showing us what hap-. 1fdad05405
-
-
-
diff --git a/spaces/digitalxingtong/Bufeiyan-a-Bert-VITS2/monotonic_align/core.c b/spaces/digitalxingtong/Bufeiyan-a-Bert-VITS2/monotonic_align/core.c
deleted file mode 100644
index 5f8af54d32474f821e9d1f4d2679d78128722596..0000000000000000000000000000000000000000
--- a/spaces/digitalxingtong/Bufeiyan-a-Bert-VITS2/monotonic_align/core.c
+++ /dev/null
@@ -1,26530 +0,0 @@
-/* Generated by Cython 3.0.0 */
-
-/* BEGIN: Cython Metadata
-{
- "distutils": {
- "name": "monotonic_align.core",
- "sources": [
- "core.pyx"
- ]
- },
- "module_name": "monotonic_align.core"
-}
-END: Cython Metadata */
-
-#ifndef PY_SSIZE_T_CLEAN
-#define PY_SSIZE_T_CLEAN
-#endif /* PY_SSIZE_T_CLEAN */
-#if defined(CYTHON_LIMITED_API) && 0
- #ifndef Py_LIMITED_API
- #if CYTHON_LIMITED_API+0 > 0x03030000
- #define Py_LIMITED_API CYTHON_LIMITED_API
- #else
- #define Py_LIMITED_API 0x03030000
- #endif
- #endif
-#endif
-
-#include "Python.h"
-#ifndef Py_PYTHON_H
- #error Python headers needed to compile C extensions, please install development version of Python.
-#elif PY_VERSION_HEX < 0x02070000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000)
- #error Cython requires Python 2.7+ or Python 3.3+.
-#else
-#define CYTHON_ABI "3_0_0"
-#define __PYX_ABI_MODULE_NAME "_cython_" CYTHON_ABI
-#define __PYX_TYPE_MODULE_PREFIX __PYX_ABI_MODULE_NAME "."
-#define CYTHON_HEX_VERSION 0x030000F0
-#define CYTHON_FUTURE_DIVISION 1
-#include
-#ifndef offsetof
- #define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
-#endif
-#if !defined(_WIN32) && !defined(WIN32) && !defined(MS_WINDOWS)
- #ifndef __stdcall
- #define __stdcall
- #endif
- #ifndef __cdecl
- #define __cdecl
- #endif
- #ifndef __fastcall
- #define __fastcall
- #endif
-#endif
-#ifndef DL_IMPORT
- #define DL_IMPORT(t) t
-#endif
-#ifndef DL_EXPORT
- #define DL_EXPORT(t) t
-#endif
-#define __PYX_COMMA ,
-#ifndef HAVE_LONG_LONG
- #define HAVE_LONG_LONG
-#endif
-#ifndef PY_LONG_LONG
- #define PY_LONG_LONG LONG_LONG
-#endif
-#ifndef Py_HUGE_VAL
- #define Py_HUGE_VAL HUGE_VAL
-#endif
-#if defined(GRAALVM_PYTHON)
- /* For very preliminary testing purposes. Most variables are set the same as PyPy.
- The existence of this section does not imply that anything works or is even tested */
- #define CYTHON_COMPILING_IN_PYPY 0
- #define CYTHON_COMPILING_IN_CPYTHON 0
- #define CYTHON_COMPILING_IN_LIMITED_API 0
- #define CYTHON_COMPILING_IN_GRAAL 1
- #define CYTHON_COMPILING_IN_NOGIL 0
- #undef CYTHON_USE_TYPE_SLOTS
- #define CYTHON_USE_TYPE_SLOTS 0
- #undef CYTHON_USE_TYPE_SPECS
- #define CYTHON_USE_TYPE_SPECS 0
- #undef CYTHON_USE_PYTYPE_LOOKUP
- #define CYTHON_USE_PYTYPE_LOOKUP 0
- #if PY_VERSION_HEX < 0x03050000
- #undef CYTHON_USE_ASYNC_SLOTS
- #define CYTHON_USE_ASYNC_SLOTS 0
- #elif !defined(CYTHON_USE_ASYNC_SLOTS)
- #define CYTHON_USE_ASYNC_SLOTS 1
- #endif
- #undef CYTHON_USE_PYLIST_INTERNALS
- #define CYTHON_USE_PYLIST_INTERNALS 0
- #undef CYTHON_USE_UNICODE_INTERNALS
- #define CYTHON_USE_UNICODE_INTERNALS 0
- #undef CYTHON_USE_UNICODE_WRITER
- #define CYTHON_USE_UNICODE_WRITER 0
- #undef CYTHON_USE_PYLONG_INTERNALS
- #define CYTHON_USE_PYLONG_INTERNALS 0
- #undef CYTHON_AVOID_BORROWED_REFS
- #define CYTHON_AVOID_BORROWED_REFS 1
- #undef CYTHON_ASSUME_SAFE_MACROS
- #define CYTHON_ASSUME_SAFE_MACROS 0
- #undef CYTHON_UNPACK_METHODS
- #define CYTHON_UNPACK_METHODS 0
- #undef CYTHON_FAST_THREAD_STATE
- #define CYTHON_FAST_THREAD_STATE 0
- #undef CYTHON_FAST_GIL
- #define CYTHON_FAST_GIL 0
- #undef CYTHON_METH_FASTCALL
- #define CYTHON_METH_FASTCALL 0
- #undef CYTHON_FAST_PYCALL
- #define CYTHON_FAST_PYCALL 0
- #ifndef CYTHON_PEP487_INIT_SUBCLASS
- #define CYTHON_PEP487_INIT_SUBCLASS (PY_MAJOR_VERSION >= 3)
- #endif
- #undef CYTHON_PEP489_MULTI_PHASE_INIT
- #define CYTHON_PEP489_MULTI_PHASE_INIT 1
- #undef CYTHON_USE_MODULE_STATE
- #define CYTHON_USE_MODULE_STATE 0
- #undef CYTHON_USE_TP_FINALIZE
- #define CYTHON_USE_TP_FINALIZE 0
- #undef CYTHON_USE_DICT_VERSIONS
- #define CYTHON_USE_DICT_VERSIONS 0
- #undef CYTHON_USE_EXC_INFO_STACK
- #define CYTHON_USE_EXC_INFO_STACK 0
- #ifndef CYTHON_UPDATE_DESCRIPTOR_DOC
- #define CYTHON_UPDATE_DESCRIPTOR_DOC 0
- #endif
-#elif defined(PYPY_VERSION)
- #define CYTHON_COMPILING_IN_PYPY 1
- #define CYTHON_COMPILING_IN_CPYTHON 0
- #define CYTHON_COMPILING_IN_LIMITED_API 0
- #define CYTHON_COMPILING_IN_GRAAL 0
- #define CYTHON_COMPILING_IN_NOGIL 0
- #undef CYTHON_USE_TYPE_SLOTS
- #define CYTHON_USE_TYPE_SLOTS 0
- #undef CYTHON_USE_TYPE_SPECS
- #define CYTHON_USE_TYPE_SPECS 0
- #undef CYTHON_USE_PYTYPE_LOOKUP
- #define CYTHON_USE_PYTYPE_LOOKUP 0
- #if PY_VERSION_HEX < 0x03050000
- #undef CYTHON_USE_ASYNC_SLOTS
- #define CYTHON_USE_ASYNC_SLOTS 0
- #elif !defined(CYTHON_USE_ASYNC_SLOTS)
- #define CYTHON_USE_ASYNC_SLOTS 1
- #endif
- #undef CYTHON_USE_PYLIST_INTERNALS
- #define CYTHON_USE_PYLIST_INTERNALS 0
- #undef CYTHON_USE_UNICODE_INTERNALS
- #define CYTHON_USE_UNICODE_INTERNALS 0
- #undef CYTHON_USE_UNICODE_WRITER
- #define CYTHON_USE_UNICODE_WRITER 0
- #undef CYTHON_USE_PYLONG_INTERNALS
- #define CYTHON_USE_PYLONG_INTERNALS 0
- #undef CYTHON_AVOID_BORROWED_REFS
- #define CYTHON_AVOID_BORROWED_REFS 1
- #undef CYTHON_ASSUME_SAFE_MACROS
- #define CYTHON_ASSUME_SAFE_MACROS 0
- #undef CYTHON_UNPACK_METHODS
- #define CYTHON_UNPACK_METHODS 0
- #undef CYTHON_FAST_THREAD_STATE
- #define CYTHON_FAST_THREAD_STATE 0
- #undef CYTHON_FAST_GIL
- #define CYTHON_FAST_GIL 0
- #undef CYTHON_METH_FASTCALL
- #define CYTHON_METH_FASTCALL 0
- #undef CYTHON_FAST_PYCALL
- #define CYTHON_FAST_PYCALL 0
- #ifndef CYTHON_PEP487_INIT_SUBCLASS
- #define CYTHON_PEP487_INIT_SUBCLASS (PY_MAJOR_VERSION >= 3)
- #endif
- #if PY_VERSION_HEX < 0x03090000
- #undef CYTHON_PEP489_MULTI_PHASE_INIT
- #define CYTHON_PEP489_MULTI_PHASE_INIT 0
- #elif !defined(CYTHON_PEP489_MULTI_PHASE_INIT)
- #define CYTHON_PEP489_MULTI_PHASE_INIT 1
- #endif
- #undef CYTHON_USE_MODULE_STATE
- #define CYTHON_USE_MODULE_STATE 0
- #undef CYTHON_USE_TP_FINALIZE
- #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1 && PYPY_VERSION_NUM >= 0x07030C00)
- #undef CYTHON_USE_DICT_VERSIONS
- #define CYTHON_USE_DICT_VERSIONS 0
- #undef CYTHON_USE_EXC_INFO_STACK
- #define CYTHON_USE_EXC_INFO_STACK 0
- #ifndef CYTHON_UPDATE_DESCRIPTOR_DOC
- #define CYTHON_UPDATE_DESCRIPTOR_DOC 0
- #endif
-#elif defined(CYTHON_LIMITED_API)
- #define CYTHON_COMPILING_IN_PYPY 0
- #define CYTHON_COMPILING_IN_CPYTHON 0
- #define CYTHON_COMPILING_IN_LIMITED_API 1
- #define CYTHON_COMPILING_IN_GRAAL 0
- #define CYTHON_COMPILING_IN_NOGIL 0
- #undef CYTHON_CLINE_IN_TRACEBACK
- #define CYTHON_CLINE_IN_TRACEBACK 0
- #undef CYTHON_USE_TYPE_SLOTS
- #define CYTHON_USE_TYPE_SLOTS 0
- #undef CYTHON_USE_TYPE_SPECS
- #define CYTHON_USE_TYPE_SPECS 1
- #undef CYTHON_USE_PYTYPE_LOOKUP
- #define CYTHON_USE_PYTYPE_LOOKUP 0
- #undef CYTHON_USE_ASYNC_SLOTS
- #define CYTHON_USE_ASYNC_SLOTS 0
- #undef CYTHON_USE_PYLIST_INTERNALS
- #define CYTHON_USE_PYLIST_INTERNALS 0
- #undef CYTHON_USE_UNICODE_INTERNALS
- #define CYTHON_USE_UNICODE_INTERNALS 0
- #ifndef CYTHON_USE_UNICODE_WRITER
- #define CYTHON_USE_UNICODE_WRITER 0
- #endif
- #undef CYTHON_USE_PYLONG_INTERNALS
- #define CYTHON_USE_PYLONG_INTERNALS 0
- #ifndef CYTHON_AVOID_BORROWED_REFS
- #define CYTHON_AVOID_BORROWED_REFS 0
- #endif
- #undef CYTHON_ASSUME_SAFE_MACROS
- #define CYTHON_ASSUME_SAFE_MACROS 0
- #undef CYTHON_UNPACK_METHODS
- #define CYTHON_UNPACK_METHODS 0
- #undef CYTHON_FAST_THREAD_STATE
- #define CYTHON_FAST_THREAD_STATE 0
- #undef CYTHON_FAST_GIL
- #define CYTHON_FAST_GIL 0
- #undef CYTHON_METH_FASTCALL
- #define CYTHON_METH_FASTCALL 0
- #undef CYTHON_FAST_PYCALL
- #define CYTHON_FAST_PYCALL 0
- #ifndef CYTHON_PEP487_INIT_SUBCLASS
- #define CYTHON_PEP487_INIT_SUBCLASS 1
- #endif
- #undef CYTHON_PEP489_MULTI_PHASE_INIT
- #define CYTHON_PEP489_MULTI_PHASE_INIT 0
- #undef CYTHON_USE_MODULE_STATE
- #define CYTHON_USE_MODULE_STATE 1
- #ifndef CYTHON_USE_TP_FINALIZE
- #define CYTHON_USE_TP_FINALIZE 1
- #endif
- #undef CYTHON_USE_DICT_VERSIONS
- #define CYTHON_USE_DICT_VERSIONS 0
- #undef CYTHON_USE_EXC_INFO_STACK
- #define CYTHON_USE_EXC_INFO_STACK 0
- #ifndef CYTHON_UPDATE_DESCRIPTOR_DOC
- #define CYTHON_UPDATE_DESCRIPTOR_DOC 0
- #endif
-#elif defined(PY_NOGIL)
- #define CYTHON_COMPILING_IN_PYPY 0
- #define CYTHON_COMPILING_IN_CPYTHON 0
- #define CYTHON_COMPILING_IN_LIMITED_API 0
- #define CYTHON_COMPILING_IN_GRAAL 0
- #define CYTHON_COMPILING_IN_NOGIL 1
- #ifndef CYTHON_USE_TYPE_SLOTS
- #define CYTHON_USE_TYPE_SLOTS 1
- #endif
- #undef CYTHON_USE_PYTYPE_LOOKUP
- #define CYTHON_USE_PYTYPE_LOOKUP 0
- #ifndef CYTHON_USE_ASYNC_SLOTS
- #define CYTHON_USE_ASYNC_SLOTS 1
- #endif
- #undef CYTHON_USE_PYLIST_INTERNALS
- #define CYTHON_USE_PYLIST_INTERNALS 0
- #ifndef CYTHON_USE_UNICODE_INTERNALS
- #define CYTHON_USE_UNICODE_INTERNALS 1
- #endif
- #undef CYTHON_USE_UNICODE_WRITER
- #define CYTHON_USE_UNICODE_WRITER 0
- #undef CYTHON_USE_PYLONG_INTERNALS
- #define CYTHON_USE_PYLONG_INTERNALS 0
- #ifndef CYTHON_AVOID_BORROWED_REFS
- #define CYTHON_AVOID_BORROWED_REFS 0
- #endif
- #ifndef CYTHON_ASSUME_SAFE_MACROS
- #define CYTHON_ASSUME_SAFE_MACROS 1
- #endif
- #ifndef CYTHON_UNPACK_METHODS
- #define CYTHON_UNPACK_METHODS 1
- #endif
- #undef CYTHON_FAST_THREAD_STATE
- #define CYTHON_FAST_THREAD_STATE 0
- #undef CYTHON_FAST_PYCALL
- #define CYTHON_FAST_PYCALL 0
- #ifndef CYTHON_PEP489_MULTI_PHASE_INIT
- #define CYTHON_PEP489_MULTI_PHASE_INIT 1
- #endif
- #ifndef CYTHON_USE_TP_FINALIZE
- #define CYTHON_USE_TP_FINALIZE 1
- #endif
- #undef CYTHON_USE_DICT_VERSIONS
- #define CYTHON_USE_DICT_VERSIONS 0
- #undef CYTHON_USE_EXC_INFO_STACK
- #define CYTHON_USE_EXC_INFO_STACK 0
-#else
- #define CYTHON_COMPILING_IN_PYPY 0
- #define CYTHON_COMPILING_IN_CPYTHON 1
- #define CYTHON_COMPILING_IN_LIMITED_API 0
- #define CYTHON_COMPILING_IN_GRAAL 0
- #define CYTHON_COMPILING_IN_NOGIL 0
- #ifndef CYTHON_USE_TYPE_SLOTS
- #define CYTHON_USE_TYPE_SLOTS 1
- #endif
- #ifndef CYTHON_USE_TYPE_SPECS
- #define CYTHON_USE_TYPE_SPECS 0
- #endif
- #ifndef CYTHON_USE_PYTYPE_LOOKUP
- #define CYTHON_USE_PYTYPE_LOOKUP 1
- #endif
- #if PY_MAJOR_VERSION < 3
- #undef CYTHON_USE_ASYNC_SLOTS
- #define CYTHON_USE_ASYNC_SLOTS 0
- #elif !defined(CYTHON_USE_ASYNC_SLOTS)
- #define CYTHON_USE_ASYNC_SLOTS 1
- #endif
- #ifndef CYTHON_USE_PYLONG_INTERNALS
- #define CYTHON_USE_PYLONG_INTERNALS 1
- #endif
- #ifndef CYTHON_USE_PYLIST_INTERNALS
- #define CYTHON_USE_PYLIST_INTERNALS 1
- #endif
- #ifndef CYTHON_USE_UNICODE_INTERNALS
- #define CYTHON_USE_UNICODE_INTERNALS 1
- #endif
- #if PY_VERSION_HEX < 0x030300F0 || PY_VERSION_HEX >= 0x030B00A2
- #undef CYTHON_USE_UNICODE_WRITER
- #define CYTHON_USE_UNICODE_WRITER 0
- #elif !defined(CYTHON_USE_UNICODE_WRITER)
- #define CYTHON_USE_UNICODE_WRITER 1
- #endif
- #ifndef CYTHON_AVOID_BORROWED_REFS
- #define CYTHON_AVOID_BORROWED_REFS 0
- #endif
- #ifndef CYTHON_ASSUME_SAFE_MACROS
- #define CYTHON_ASSUME_SAFE_MACROS 1
- #endif
- #ifndef CYTHON_UNPACK_METHODS
- #define CYTHON_UNPACK_METHODS 1
- #endif
- #ifndef CYTHON_FAST_THREAD_STATE
- #define CYTHON_FAST_THREAD_STATE 1
- #endif
- #ifndef CYTHON_FAST_GIL
- #define CYTHON_FAST_GIL (PY_MAJOR_VERSION < 3 || PY_VERSION_HEX >= 0x03060000 && PY_VERSION_HEX < 0x030C00A6)
- #endif
- #ifndef CYTHON_METH_FASTCALL
- #define CYTHON_METH_FASTCALL (PY_VERSION_HEX >= 0x030700A1)
- #endif
- #ifndef CYTHON_FAST_PYCALL
- #define CYTHON_FAST_PYCALL 1
- #endif
- #ifndef CYTHON_PEP487_INIT_SUBCLASS
- #define CYTHON_PEP487_INIT_SUBCLASS 1
- #endif
- #if PY_VERSION_HEX < 0x03050000
- #undef CYTHON_PEP489_MULTI_PHASE_INIT
- #define CYTHON_PEP489_MULTI_PHASE_INIT 0
- #elif !defined(CYTHON_PEP489_MULTI_PHASE_INIT)
- #define CYTHON_PEP489_MULTI_PHASE_INIT 1
- #endif
- #ifndef CYTHON_USE_MODULE_STATE
- #define CYTHON_USE_MODULE_STATE 0
- #endif
- #if PY_VERSION_HEX < 0x030400a1
- #undef CYTHON_USE_TP_FINALIZE
- #define CYTHON_USE_TP_FINALIZE 0
- #elif !defined(CYTHON_USE_TP_FINALIZE)
- #define CYTHON_USE_TP_FINALIZE 1
- #endif
- #if PY_VERSION_HEX < 0x030600B1
- #undef CYTHON_USE_DICT_VERSIONS
- #define CYTHON_USE_DICT_VERSIONS 0
- #elif !defined(CYTHON_USE_DICT_VERSIONS)
- #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX < 0x030C00A5)
- #endif
- #if PY_VERSION_HEX < 0x030700A3
- #undef CYTHON_USE_EXC_INFO_STACK
- #define CYTHON_USE_EXC_INFO_STACK 0
- #elif !defined(CYTHON_USE_EXC_INFO_STACK)
- #define CYTHON_USE_EXC_INFO_STACK 1
- #endif
- #ifndef CYTHON_UPDATE_DESCRIPTOR_DOC
- #define CYTHON_UPDATE_DESCRIPTOR_DOC 1
- #endif
-#endif
-#if !defined(CYTHON_FAST_PYCCALL)
-#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1)
-#endif
-#if !defined(CYTHON_VECTORCALL)
-#define CYTHON_VECTORCALL (CYTHON_FAST_PYCCALL && PY_VERSION_HEX >= 0x030800B1)
-#endif
-#define CYTHON_BACKPORT_VECTORCALL (CYTHON_METH_FASTCALL && PY_VERSION_HEX < 0x030800B1)
-#if CYTHON_USE_PYLONG_INTERNALS
- #if PY_MAJOR_VERSION < 3
- #include "longintrepr.h"
- #endif
- #undef SHIFT
- #undef BASE
- #undef MASK
- #ifdef SIZEOF_VOID_P
- enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) };
- #endif
-#endif
-#ifndef __has_attribute
- #define __has_attribute(x) 0
-#endif
-#ifndef __has_cpp_attribute
- #define __has_cpp_attribute(x) 0
-#endif
-#ifndef CYTHON_RESTRICT
- #if defined(__GNUC__)
- #define CYTHON_RESTRICT __restrict__
- #elif defined(_MSC_VER) && _MSC_VER >= 1400
- #define CYTHON_RESTRICT __restrict
- #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
- #define CYTHON_RESTRICT restrict
- #else
- #define CYTHON_RESTRICT
- #endif
-#endif
-#ifndef CYTHON_UNUSED
- #if defined(__cplusplus)
- /* for clang __has_cpp_attribute(maybe_unused) is true even before C++17
- * but leads to warnings with -pedantic, since it is a C++17 feature */
- #if ((defined(_MSVC_LANG) && _MSVC_LANG >= 201703L) || __cplusplus >= 201703L)
- #if __has_cpp_attribute(maybe_unused)
- #define CYTHON_UNUSED [[maybe_unused]]
- #endif
- #endif
- #endif
-#endif
-#ifndef CYTHON_UNUSED
-# if defined(__GNUC__)
-# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
-# define CYTHON_UNUSED __attribute__ ((__unused__))
-# else
-# define CYTHON_UNUSED
-# endif
-# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
-# define CYTHON_UNUSED __attribute__ ((__unused__))
-# else
-# define CYTHON_UNUSED
-# endif
-#endif
-#ifndef CYTHON_UNUSED_VAR
-# if defined(__cplusplus)
- template void CYTHON_UNUSED_VAR( const T& ) { }
-# else
-# define CYTHON_UNUSED_VAR(x) (void)(x)
-# endif
-#endif
-#ifndef CYTHON_MAYBE_UNUSED_VAR
- #define CYTHON_MAYBE_UNUSED_VAR(x) CYTHON_UNUSED_VAR(x)
-#endif
-#ifndef CYTHON_NCP_UNUSED
-# if CYTHON_COMPILING_IN_CPYTHON
-# define CYTHON_NCP_UNUSED
-# else
-# define CYTHON_NCP_UNUSED CYTHON_UNUSED
-# endif
-#endif
-#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
-#ifdef _MSC_VER
- #ifndef _MSC_STDINT_H_
- #if _MSC_VER < 1300
- typedef unsigned char uint8_t;
- typedef unsigned short uint16_t;
- typedef unsigned int uint32_t;
- #else
- typedef unsigned __int8 uint8_t;
- typedef unsigned __int16 uint16_t;
- typedef unsigned __int32 uint32_t;
- #endif
- #endif
- #if _MSC_VER < 1300
- #ifdef _WIN64
- typedef unsigned long long __pyx_uintptr_t;
- #else
- typedef unsigned int __pyx_uintptr_t;
- #endif
- #else
- #ifdef _WIN64
- typedef unsigned __int64 __pyx_uintptr_t;
- #else
- typedef unsigned __int32 __pyx_uintptr_t;
- #endif
- #endif
-#else
- #include
- typedef uintptr_t __pyx_uintptr_t;
-#endif
-#ifndef CYTHON_FALLTHROUGH
- #if defined(__cplusplus)
- /* for clang __has_cpp_attribute(fallthrough) is true even before C++17
- * but leads to warnings with -pedantic, since it is a C++17 feature */
- #if ((defined(_MSVC_LANG) && _MSVC_LANG >= 201703L) || __cplusplus >= 201703L)
- #if __has_cpp_attribute(fallthrough)
- #define CYTHON_FALLTHROUGH [[fallthrough]]
- #endif
- #endif
- #ifndef CYTHON_FALLTHROUGH
- #if __has_cpp_attribute(clang::fallthrough)
- #define CYTHON_FALLTHROUGH [[clang::fallthrough]]
- #elif __has_cpp_attribute(gnu::fallthrough)
- #define CYTHON_FALLTHROUGH [[gnu::fallthrough]]
- #endif
- #endif
- #endif
- #ifndef CYTHON_FALLTHROUGH
- #if __has_attribute(fallthrough)
- #define CYTHON_FALLTHROUGH __attribute__((fallthrough))
- #else
- #define CYTHON_FALLTHROUGH
- #endif
- #endif
- #if defined(__clang__) && defined(__apple_build_version__)
- #if __apple_build_version__ < 7000000
- #undef CYTHON_FALLTHROUGH
- #define CYTHON_FALLTHROUGH
- #endif
- #endif
-#endif
-#ifdef __cplusplus
- template
- struct __PYX_IS_UNSIGNED_IMPL {static const bool value = T(0) < T(-1);};
- #define __PYX_IS_UNSIGNED(type) (__PYX_IS_UNSIGNED_IMPL::value)
-#else
- #define __PYX_IS_UNSIGNED(type) (((type)-1) > 0)
-#endif
-#if CYTHON_COMPILING_IN_PYPY == 1
- #define __PYX_NEED_TP_PRINT_SLOT (PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x030A0000)
-#else
- #define __PYX_NEED_TP_PRINT_SLOT (PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000)
-#endif
-#define __PYX_REINTERPRET_FUNCION(func_pointer, other_pointer) ((func_pointer)(void(*)(void))(other_pointer))
-
-#ifndef CYTHON_INLINE
- #if defined(__clang__)
- #define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
- #elif defined(__GNUC__)
- #define CYTHON_INLINE __inline__
- #elif defined(_MSC_VER)
- #define CYTHON_INLINE __inline
- #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
- #define CYTHON_INLINE inline
- #else
- #define CYTHON_INLINE
- #endif
-#endif
-
-#define __PYX_BUILD_PY_SSIZE_T "n"
-#define CYTHON_FORMAT_SSIZE_T "z"
-#if PY_MAJOR_VERSION < 3
- #define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
- #define __Pyx_DefaultClassType PyClass_Type
- #define __Pyx_PyCode_New(a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
- PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
-#else
- #define __Pyx_BUILTIN_MODULE_NAME "builtins"
- #define __Pyx_DefaultClassType PyType_Type
-#if PY_VERSION_HEX >= 0x030B00A1
- static CYTHON_INLINE PyCodeObject* __Pyx_PyCode_New(int a, int p, int k, int l, int s, int f,
- PyObject *code, PyObject *c, PyObject* n, PyObject *v,
- PyObject *fv, PyObject *cell, PyObject* fn,
- PyObject *name, int fline, PyObject *lnos) {
- PyObject *kwds=NULL, *argcount=NULL, *posonlyargcount=NULL, *kwonlyargcount=NULL;
- PyObject *nlocals=NULL, *stacksize=NULL, *flags=NULL, *replace=NULL, *empty=NULL;
- const char *fn_cstr=NULL;
- const char *name_cstr=NULL;
- PyCodeObject *co=NULL, *result=NULL;
- PyObject *type, *value, *traceback;
- PyErr_Fetch(&type, &value, &traceback);
- if (!(kwds=PyDict_New())) goto end;
- if (!(argcount=PyLong_FromLong(a))) goto end;
- if (PyDict_SetItemString(kwds, "co_argcount", argcount) != 0) goto end;
- if (!(posonlyargcount=PyLong_FromLong(p))) goto end;
- if (PyDict_SetItemString(kwds, "co_posonlyargcount", posonlyargcount) != 0) goto end;
- if (!(kwonlyargcount=PyLong_FromLong(k))) goto end;
- if (PyDict_SetItemString(kwds, "co_kwonlyargcount", kwonlyargcount) != 0) goto end;
- if (!(nlocals=PyLong_FromLong(l))) goto end;
- if (PyDict_SetItemString(kwds, "co_nlocals", nlocals) != 0) goto end;
- if (!(stacksize=PyLong_FromLong(s))) goto end;
- if (PyDict_SetItemString(kwds, "co_stacksize", stacksize) != 0) goto end;
- if (!(flags=PyLong_FromLong(f))) goto end;
- if (PyDict_SetItemString(kwds, "co_flags", flags) != 0) goto end;
- if (PyDict_SetItemString(kwds, "co_code", code) != 0) goto end;
- if (PyDict_SetItemString(kwds, "co_consts", c) != 0) goto end;
- if (PyDict_SetItemString(kwds, "co_names", n) != 0) goto end;
- if (PyDict_SetItemString(kwds, "co_varnames", v) != 0) goto end;
- if (PyDict_SetItemString(kwds, "co_freevars", fv) != 0) goto end;
- if (PyDict_SetItemString(kwds, "co_cellvars", cell) != 0) goto end;
- if (PyDict_SetItemString(kwds, "co_linetable", lnos) != 0) goto end;
- if (!(fn_cstr=PyUnicode_AsUTF8AndSize(fn, NULL))) goto end;
- if (!(name_cstr=PyUnicode_AsUTF8AndSize(name, NULL))) goto end;
- if (!(co = PyCode_NewEmpty(fn_cstr, name_cstr, fline))) goto end;
- if (!(replace = PyObject_GetAttrString((PyObject*)co, "replace"))) goto end;
- if (!(empty = PyTuple_New(0))) goto end;
- result = (PyCodeObject*) PyObject_Call(replace, empty, kwds);
- end:
- Py_XDECREF((PyObject*) co);
- Py_XDECREF(kwds);
- Py_XDECREF(argcount);
- Py_XDECREF(posonlyargcount);
- Py_XDECREF(kwonlyargcount);
- Py_XDECREF(nlocals);
- Py_XDECREF(stacksize);
- Py_XDECREF(replace);
- Py_XDECREF(empty);
- if (type) {
- PyErr_Restore(type, value, traceback);
- }
- return result;
- }
-#elif PY_VERSION_HEX >= 0x030800B2 && !CYTHON_COMPILING_IN_PYPY
- #define __Pyx_PyCode_New(a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
- PyCode_NewWithPosOnlyArgs(a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
-#else
- #define __Pyx_PyCode_New(a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
- PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
-#endif
-#endif
-#if PY_VERSION_HEX >= 0x030900A4 || defined(Py_IS_TYPE)
- #define __Pyx_IS_TYPE(ob, type) Py_IS_TYPE(ob, type)
-#else
- #define __Pyx_IS_TYPE(ob, type) (((const PyObject*)ob)->ob_type == (type))
-#endif
-#if PY_VERSION_HEX >= 0x030A00B1 || defined(Py_Is)
- #define __Pyx_Py_Is(x, y) Py_Is(x, y)
-#else
- #define __Pyx_Py_Is(x, y) ((x) == (y))
-#endif
-#if PY_VERSION_HEX >= 0x030A00B1 || defined(Py_IsNone)
- #define __Pyx_Py_IsNone(ob) Py_IsNone(ob)
-#else
- #define __Pyx_Py_IsNone(ob) __Pyx_Py_Is((ob), Py_None)
-#endif
-#if PY_VERSION_HEX >= 0x030A00B1 || defined(Py_IsTrue)
- #define __Pyx_Py_IsTrue(ob) Py_IsTrue(ob)
-#else
- #define __Pyx_Py_IsTrue(ob) __Pyx_Py_Is((ob), Py_True)
-#endif
-#if PY_VERSION_HEX >= 0x030A00B1 || defined(Py_IsFalse)
- #define __Pyx_Py_IsFalse(ob) Py_IsFalse(ob)
-#else
- #define __Pyx_Py_IsFalse(ob) __Pyx_Py_Is((ob), Py_False)
-#endif
-#define __Pyx_NoneAsNull(obj) (__Pyx_Py_IsNone(obj) ? NULL : (obj))
-#if PY_VERSION_HEX >= 0x030900F0 && !CYTHON_COMPILING_IN_PYPY
- #define __Pyx_PyObject_GC_IsFinalized(o) PyObject_GC_IsFinalized(o)
-#else
- #define __Pyx_PyObject_GC_IsFinalized(o) _PyGC_FINALIZED(o)
-#endif
-#ifndef CO_COROUTINE
- #define CO_COROUTINE 0x80
-#endif
-#ifndef CO_ASYNC_GENERATOR
- #define CO_ASYNC_GENERATOR 0x200
-#endif
-#ifndef Py_TPFLAGS_CHECKTYPES
- #define Py_TPFLAGS_CHECKTYPES 0
-#endif
-#ifndef Py_TPFLAGS_HAVE_INDEX
- #define Py_TPFLAGS_HAVE_INDEX 0
-#endif
-#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
- #define Py_TPFLAGS_HAVE_NEWBUFFER 0
-#endif
-#ifndef Py_TPFLAGS_HAVE_FINALIZE
- #define Py_TPFLAGS_HAVE_FINALIZE 0
-#endif
-#ifndef Py_TPFLAGS_SEQUENCE
- #define Py_TPFLAGS_SEQUENCE 0
-#endif
-#ifndef Py_TPFLAGS_MAPPING
- #define Py_TPFLAGS_MAPPING 0
-#endif
-#ifndef METH_STACKLESS
- #define METH_STACKLESS 0
-#endif
-#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL)
- #ifndef METH_FASTCALL
- #define METH_FASTCALL 0x80
- #endif
- typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs);
- typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args,
- Py_ssize_t nargs, PyObject *kwnames);
-#else
- #define __Pyx_PyCFunctionFast _PyCFunctionFast
- #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords
-#endif
-#if CYTHON_METH_FASTCALL
- #define __Pyx_METH_FASTCALL METH_FASTCALL
- #define __Pyx_PyCFunction_FastCall __Pyx_PyCFunctionFast
- #define __Pyx_PyCFunction_FastCallWithKeywords __Pyx_PyCFunctionFastWithKeywords
-#else
- #define __Pyx_METH_FASTCALL METH_VARARGS
- #define __Pyx_PyCFunction_FastCall PyCFunction
- #define __Pyx_PyCFunction_FastCallWithKeywords PyCFunctionWithKeywords
-#endif
-#if CYTHON_VECTORCALL
- #define __pyx_vectorcallfunc vectorcallfunc
- #define __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET PY_VECTORCALL_ARGUMENTS_OFFSET
- #define __Pyx_PyVectorcall_NARGS(n) PyVectorcall_NARGS((size_t)(n))
-#elif CYTHON_BACKPORT_VECTORCALL
- typedef PyObject *(*__pyx_vectorcallfunc)(PyObject *callable, PyObject *const *args,
- size_t nargsf, PyObject *kwnames);
- #define __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET ((size_t)1 << (8 * sizeof(size_t) - 1))
- #define __Pyx_PyVectorcall_NARGS(n) ((Py_ssize_t)(((size_t)(n)) & ~__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET))
-#else
- #define __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET 0
- #define __Pyx_PyVectorcall_NARGS(n) ((Py_ssize_t)(n))
-#endif
-#if PY_VERSION_HEX < 0x030900B1
- #define __Pyx_PyType_FromModuleAndSpec(m, s, b) ((void)m, PyType_FromSpecWithBases(s, b))
- typedef PyObject *(*__Pyx_PyCMethod)(PyObject *, PyTypeObject *, PyObject *const *, size_t, PyObject *);
-#else
- #define __Pyx_PyType_FromModuleAndSpec(m, s, b) PyType_FromModuleAndSpec(m, s, b)
- #define __Pyx_PyCMethod PyCMethod
-#endif
-#ifndef METH_METHOD
- #define METH_METHOD 0x200
-#endif
-#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
- #define PyObject_Malloc(s) PyMem_Malloc(s)
- #define PyObject_Free(p) PyMem_Free(p)
- #define PyObject_Realloc(p) PyMem_Realloc(p)
-#endif
-#if CYTHON_COMPILING_IN_LIMITED_API
- #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0)
- #define __Pyx_PyFrame_SetLineNumber(frame, lineno)
-#else
- #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0)
- #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno)
-#endif
-#if CYTHON_COMPILING_IN_LIMITED_API
- #define __Pyx_PyThreadState_Current PyThreadState_Get()
-#elif !CYTHON_FAST_THREAD_STATE
- #define __Pyx_PyThreadState_Current PyThreadState_GET()
-#elif PY_VERSION_HEX >= 0x03060000
- #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet()
-#elif PY_VERSION_HEX >= 0x03000000
- #define __Pyx_PyThreadState_Current PyThreadState_GET()
-#else
- #define __Pyx_PyThreadState_Current _PyThreadState_Current
-#endif
-#if CYTHON_COMPILING_IN_LIMITED_API
-static CYTHON_INLINE void *__Pyx_PyModule_GetState(PyObject *op)
-{
- void *result;
- result = PyModule_GetState(op);
- if (!result)
- Py_FatalError("Couldn't find the module state");
- return result;
-}
-#endif
-#define __Pyx_PyObject_GetSlot(obj, name, func_ctype) __Pyx_PyType_GetSlot(Py_TYPE(obj), name, func_ctype)
-#if CYTHON_COMPILING_IN_LIMITED_API
- #define __Pyx_PyType_GetSlot(type, name, func_ctype) ((func_ctype) PyType_GetSlot((type), Py_##name))
-#else
- #define __Pyx_PyType_GetSlot(type, name, func_ctype) ((type)->name)
-#endif
-#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT)
-#include "pythread.h"
-#define Py_tss_NEEDS_INIT 0
-typedef int Py_tss_t;
-static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) {
- *key = PyThread_create_key();
- return 0;
-}
-static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) {
- Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t));
- *key = Py_tss_NEEDS_INIT;
- return key;
-}
-static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) {
- PyObject_Free(key);
-}
-static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) {
- return *key != Py_tss_NEEDS_INIT;
-}
-static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) {
- PyThread_delete_key(*key);
- *key = Py_tss_NEEDS_INIT;
-}
-static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) {
- return PyThread_set_key_value(*key, value);
-}
-static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) {
- return PyThread_get_key_value(*key);
-}
-#endif
-#if PY_MAJOR_VERSION < 3
- #if CYTHON_COMPILING_IN_PYPY
- #if PYPY_VERSION_NUM < 0x07030600
- #if defined(__cplusplus) && __cplusplus >= 201402L
- [[deprecated("`with nogil:` inside a nogil function will not release the GIL in PyPy2 < 7.3.6")]]
- #elif defined(__GNUC__) || defined(__clang__)
- __attribute__ ((__deprecated__("`with nogil:` inside a nogil function will not release the GIL in PyPy2 < 7.3.6")))
- #elif defined(_MSC_VER)
- __declspec(deprecated("`with nogil:` inside a nogil function will not release the GIL in PyPy2 < 7.3.6"))
- #endif
- static CYTHON_INLINE int PyGILState_Check(void) {
- return 0;
- }
- #else // PYPY_VERSION_NUM < 0x07030600
- #endif // PYPY_VERSION_NUM < 0x07030600
- #else
- static CYTHON_INLINE int PyGILState_Check(void) {
- PyThreadState * tstate = _PyThreadState_Current;
- return tstate && (tstate == PyGILState_GetThisThreadState());
- }
- #endif
-#endif
-#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized)
-#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n))
-#else
-#define __Pyx_PyDict_NewPresized(n) PyDict_New()
-#endif
-#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION
- #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
- #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
-#else
- #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
- #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
-#endif
-#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX > 0x030600B4 && CYTHON_USE_UNICODE_INTERNALS
-#define __Pyx_PyDict_GetItemStrWithError(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash)
-static CYTHON_INLINE PyObject * __Pyx_PyDict_GetItemStr(PyObject *dict, PyObject *name) {
- PyObject *res = __Pyx_PyDict_GetItemStrWithError(dict, name);
- if (res == NULL) PyErr_Clear();
- return res;
-}
-#elif PY_MAJOR_VERSION >= 3 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07020000)
-#define __Pyx_PyDict_GetItemStrWithError PyDict_GetItemWithError
-#define __Pyx_PyDict_GetItemStr PyDict_GetItem
-#else
-static CYTHON_INLINE PyObject * __Pyx_PyDict_GetItemStrWithError(PyObject *dict, PyObject *name) {
-#if CYTHON_COMPILING_IN_PYPY
- return PyDict_GetItem(dict, name);
-#else
- PyDictEntry *ep;
- PyDictObject *mp = (PyDictObject*) dict;
- long hash = ((PyStringObject *) name)->ob_shash;
- assert(hash != -1);
- ep = (mp->ma_lookup)(mp, name, hash);
- if (ep == NULL) {
- return NULL;
- }
- return ep->me_value;
-#endif
-}
-#define __Pyx_PyDict_GetItemStr PyDict_GetItem
-#endif
-#if CYTHON_USE_TYPE_SLOTS
- #define __Pyx_PyType_GetFlags(tp) (((PyTypeObject *)tp)->tp_flags)
- #define __Pyx_PyType_HasFeature(type, feature) ((__Pyx_PyType_GetFlags(type) & (feature)) != 0)
- #define __Pyx_PyObject_GetIterNextFunc(obj) (Py_TYPE(obj)->tp_iternext)
-#else
- #define __Pyx_PyType_GetFlags(tp) (PyType_GetFlags((PyTypeObject *)tp))
- #define __Pyx_PyType_HasFeature(type, feature) PyType_HasFeature(type, feature)
- #define __Pyx_PyObject_GetIterNextFunc(obj) PyIter_Next
-#endif
-#if CYTHON_USE_TYPE_SPECS && PY_VERSION_HEX >= 0x03080000
-#define __Pyx_PyHeapTypeObject_GC_Del(obj) {\
- PyTypeObject *type = Py_TYPE(obj);\
- assert(__Pyx_PyType_HasFeature(type, Py_TPFLAGS_HEAPTYPE));\
- PyObject_GC_Del(obj);\
- Py_DECREF(type);\
-}
-#else
-#define __Pyx_PyHeapTypeObject_GC_Del(obj) PyObject_GC_Del(obj)
-#endif
-#if CYTHON_COMPILING_IN_LIMITED_API
- #define CYTHON_PEP393_ENABLED 1
- #define __Pyx_PyUnicode_READY(op) (0)
- #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GetLength(u)
- #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_ReadChar(u, i)
- #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((void)u, 1114111U)
- #define __Pyx_PyUnicode_KIND(u) ((void)u, (0))
- #define __Pyx_PyUnicode_DATA(u) ((void*)u)
- #define __Pyx_PyUnicode_READ(k, d, i) ((void)k, PyUnicode_ReadChar((PyObject*)(d), i))
- #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GetLength(u))
-#elif PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
- #define CYTHON_PEP393_ENABLED 1
- #if PY_VERSION_HEX >= 0x030C0000
- #define __Pyx_PyUnicode_READY(op) (0)
- #else
- #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\
- 0 : _PyUnicode_Ready((PyObject *)(op)))
- #endif
- #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
- #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
- #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u)
- #define __Pyx_PyUnicode_KIND(u) ((int)PyUnicode_KIND(u))
- #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
- #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
- #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, (Py_UCS4) ch)
- #if PY_VERSION_HEX >= 0x030C0000
- #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u))
- #else
- #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03090000
- #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : ((PyCompactUnicodeObject *)(u))->wstr_length))
- #else
- #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u)))
- #endif
- #endif
-#else
- #define CYTHON_PEP393_ENABLED 0
- #define PyUnicode_1BYTE_KIND 1
- #define PyUnicode_2BYTE_KIND 2
- #define PyUnicode_4BYTE_KIND 4
- #define __Pyx_PyUnicode_READY(op) (0)
- #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
- #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
- #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535U : 1114111U)
- #define __Pyx_PyUnicode_KIND(u) ((int)sizeof(Py_UNICODE))
- #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
- #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
- #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = (Py_UNICODE) ch)
- #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u))
-#endif
-#if CYTHON_COMPILING_IN_PYPY
- #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
- #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
-#else
- #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
- #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\
- PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
-#endif
-#if CYTHON_COMPILING_IN_PYPY
- #if !defined(PyUnicode_DecodeUnicodeEscape)
- #define PyUnicode_DecodeUnicodeEscape(s, size, errors) PyUnicode_Decode(s, size, "unicode_escape", errors)
- #endif
- #if !defined(PyUnicode_Contains) || (PY_MAJOR_VERSION == 2 && PYPY_VERSION_NUM < 0x07030500)
- #undef PyUnicode_Contains
- #define PyUnicode_Contains(u, s) PySequence_Contains(u, s)
- #endif
- #if !defined(PyByteArray_Check)
- #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type)
- #endif
- #if !defined(PyObject_Format)
- #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt)
- #endif
-#endif
-#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
-#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
-#if PY_MAJOR_VERSION >= 3
- #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
-#else
- #define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
-#endif
-#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII)
- #define PyObject_ASCII(o) PyObject_Repr(o)
-#endif
-#if PY_MAJOR_VERSION >= 3
- #define PyBaseString_Type PyUnicode_Type
- #define PyStringObject PyUnicodeObject
- #define PyString_Type PyUnicode_Type
- #define PyString_Check PyUnicode_Check
- #define PyString_CheckExact PyUnicode_CheckExact
-#ifndef PyObject_Unicode
- #define PyObject_Unicode PyObject_Str
-#endif
-#endif
-#if PY_MAJOR_VERSION >= 3
- #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
- #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
-#else
- #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
- #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
-#endif
-#if CYTHON_COMPILING_IN_CPYTHON
- #define __Pyx_PySequence_ListKeepNew(obj)\
- (likely(PyList_CheckExact(obj) && Py_REFCNT(obj) == 1) ? __Pyx_NewRef(obj) : PySequence_List(obj))
-#else
- #define __Pyx_PySequence_ListKeepNew(obj) PySequence_List(obj)
-#endif
-#ifndef PySet_CheckExact
- #define PySet_CheckExact(obj) __Pyx_IS_TYPE(obj, &PySet_Type)
-#endif
-#if PY_VERSION_HEX >= 0x030900A4
- #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt)
- #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size)
-#else
- #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt)
- #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size)
-#endif
-#if CYTHON_ASSUME_SAFE_MACROS
- #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq)
-#else
- #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq)
-#endif
-#if PY_MAJOR_VERSION >= 3
- #define PyIntObject PyLongObject
- #define PyInt_Type PyLong_Type
- #define PyInt_Check(op) PyLong_Check(op)
- #define PyInt_CheckExact(op) PyLong_CheckExact(op)
- #define __Pyx_Py3Int_Check(op) PyLong_Check(op)
- #define __Pyx_Py3Int_CheckExact(op) PyLong_CheckExact(op)
- #define PyInt_FromString PyLong_FromString
- #define PyInt_FromUnicode PyLong_FromUnicode
- #define PyInt_FromLong PyLong_FromLong
- #define PyInt_FromSize_t PyLong_FromSize_t
- #define PyInt_FromSsize_t PyLong_FromSsize_t
- #define PyInt_AsLong PyLong_AsLong
- #define PyInt_AS_LONG PyLong_AS_LONG
- #define PyInt_AsSsize_t PyLong_AsSsize_t
- #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
- #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
- #define PyNumber_Int PyNumber_Long
-#else
- #define __Pyx_Py3Int_Check(op) (PyLong_Check(op) || PyInt_Check(op))
- #define __Pyx_Py3Int_CheckExact(op) (PyLong_CheckExact(op) || PyInt_CheckExact(op))
-#endif
-#if PY_MAJOR_VERSION >= 3
- #define PyBoolObject PyLongObject
-#endif
-#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY
- #ifndef PyUnicode_InternFromString
- #define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
- #endif
-#endif
-#if PY_VERSION_HEX < 0x030200A4
- typedef long Py_hash_t;
- #define __Pyx_PyInt_FromHash_t PyInt_FromLong
- #define __Pyx_PyInt_AsHash_t __Pyx_PyIndex_AsHash_t
-#else
- #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
- #define __Pyx_PyInt_AsHash_t __Pyx_PyIndex_AsSsize_t
-#endif
-#if CYTHON_USE_ASYNC_SLOTS
- #if PY_VERSION_HEX >= 0x030500B1
- #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods
- #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async)
- #else
- #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved))
- #endif
-#else
- #define __Pyx_PyType_AsAsync(obj) NULL
-#endif
-#ifndef __Pyx_PyAsyncMethodsStruct
- typedef struct {
- unaryfunc am_await;
- unaryfunc am_aiter;
- unaryfunc am_anext;
- } __Pyx_PyAsyncMethodsStruct;
-#endif
-
-#if defined(_WIN32) || defined(WIN32) || defined(MS_WINDOWS)
- #if !defined(_USE_MATH_DEFINES)
- #define _USE_MATH_DEFINES
- #endif
-#endif
-#include
-#ifdef NAN
-#define __PYX_NAN() ((float) NAN)
-#else
-static CYTHON_INLINE float __PYX_NAN() {
- float value;
- memset(&value, 0xFF, sizeof(value));
- return value;
-}
-#endif
-#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL)
-#define __Pyx_truncl trunc
-#else
-#define __Pyx_truncl truncl
-#endif
-
-#define __PYX_MARK_ERR_POS(f_index, lineno) \
- { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; }
-#define __PYX_ERR(f_index, lineno, Ln_error) \
- { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; }
-
-#ifdef CYTHON_EXTERN_C
- #undef __PYX_EXTERN_C
- #define __PYX_EXTERN_C CYTHON_EXTERN_C
-#elif defined(__PYX_EXTERN_C)
- #ifdef _MSC_VER
- #pragma message ("Please do not define the '__PYX_EXTERN_C' macro externally. Use 'CYTHON_EXTERN_C' instead.")
- #else
- #warning Please do not define the '__PYX_EXTERN_C' macro externally. Use 'CYTHON_EXTERN_C' instead.
- #endif
-#else
- #ifdef __cplusplus
- #define __PYX_EXTERN_C extern "C"
- #else
- #define __PYX_EXTERN_C extern
- #endif
-#endif
-
-#define __PYX_HAVE__monotonic_align__core
-#define __PYX_HAVE_API__monotonic_align__core
-/* Early includes */
-#include "pythread.h"
-#include
-#include
-#ifdef _OPENMP
-#include
-#endif /* _OPENMP */
-
-#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS)
-#define CYTHON_WITHOUT_ASSERTIONS
-#endif
-
-typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding;
- const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
-
-#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
-#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0
-#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8)
-#define __PYX_DEFAULT_STRING_ENCODING ""
-#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
-#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
-#define __Pyx_uchar_cast(c) ((unsigned char)c)
-#define __Pyx_long_cast(x) ((long)x)
-#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\
- (sizeof(type) < sizeof(Py_ssize_t)) ||\
- (sizeof(type) > sizeof(Py_ssize_t) &&\
- likely(v < (type)PY_SSIZE_T_MAX ||\
- v == (type)PY_SSIZE_T_MAX) &&\
- (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\
- v == (type)PY_SSIZE_T_MIN))) ||\
- (sizeof(type) == sizeof(Py_ssize_t) &&\
- (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\
- v == (type)PY_SSIZE_T_MAX))) )
-static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) {
- return (size_t) i < (size_t) limit;
-}
-#if defined (__cplusplus) && __cplusplus >= 201103L
- #include
- #define __Pyx_sst_abs(value) std::abs(value)
-#elif SIZEOF_INT >= SIZEOF_SIZE_T
- #define __Pyx_sst_abs(value) abs(value)
-#elif SIZEOF_LONG >= SIZEOF_SIZE_T
- #define __Pyx_sst_abs(value) labs(value)
-#elif defined (_MSC_VER)
- #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value))
-#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
- #define __Pyx_sst_abs(value) llabs(value)
-#elif defined (__GNUC__)
- #define __Pyx_sst_abs(value) __builtin_llabs(value)
-#else
- #define __Pyx_sst_abs(value) ((value<0) ? -value : value)
-#endif
-static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*);
-static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
-#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
-#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
-#define __Pyx_PyBytes_FromString PyBytes_FromString
-#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
-static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
-#if PY_MAJOR_VERSION < 3
- #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
- #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
-#else
- #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
- #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
-#endif
-#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s))
-#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s))
-#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s))
-#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s))
-#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s))
-#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s))
-#define __Pyx_PyObject_AsWritableString(s) ((char*)(__pyx_uintptr_t) __Pyx_PyObject_AsString(s))
-#define __Pyx_PyObject_AsWritableSString(s) ((signed char*)(__pyx_uintptr_t) __Pyx_PyObject_AsString(s))
-#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*)(__pyx_uintptr_t) __Pyx_PyObject_AsString(s))
-#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s))
-#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s))
-#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s)
-#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s)
-#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s)
-#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s)
-#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
-#if CYTHON_COMPILING_IN_LIMITED_API
-static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const wchar_t *u)
-{
- const wchar_t *u_end = u;
- while (*u_end++) ;
- return (size_t)(u_end - u - 1);
-}
-#else
-static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u)
-{
- const Py_UNICODE *u_end = u;
- while (*u_end++) ;
- return (size_t)(u_end - u - 1);
-}
-#endif
-#define __Pyx_PyUnicode_FromOrdinal(o) PyUnicode_FromOrdinal((int)o)
-#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
-#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
-#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
-#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj)
-#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None)
-static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b);
-static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
-static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*);
-static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x);
-#define __Pyx_PySequence_Tuple(obj)\
- (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj))
-static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
-static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
-static CYTHON_INLINE Py_hash_t __Pyx_PyIndex_AsHash_t(PyObject*);
-#if CYTHON_ASSUME_SAFE_MACROS
-#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
-#else
-#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
-#endif
-#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
-#if PY_MAJOR_VERSION >= 3
-#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x))
-#else
-#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x))
-#endif
-#if CYTHON_USE_PYLONG_INTERNALS
- #if PY_VERSION_HEX >= 0x030C00A7
- #ifndef _PyLong_SIGN_MASK
- #define _PyLong_SIGN_MASK 3
- #endif
- #ifndef _PyLong_NON_SIZE_BITS
- #define _PyLong_NON_SIZE_BITS 3
- #endif
- #define __Pyx_PyLong_Sign(x) (((PyLongObject*)x)->long_value.lv_tag & _PyLong_SIGN_MASK)
- #define __Pyx_PyLong_IsNeg(x) ((__Pyx_PyLong_Sign(x) & 2) != 0)
- #define __Pyx_PyLong_IsNonNeg(x) (!__Pyx_PyLong_IsNeg(x))
- #define __Pyx_PyLong_IsZero(x) (__Pyx_PyLong_Sign(x) & 1)
- #define __Pyx_PyLong_IsPos(x) (__Pyx_PyLong_Sign(x) == 0)
- #define __Pyx_PyLong_CompactValueUnsigned(x) (__Pyx_PyLong_Digits(x)[0])
- #define __Pyx_PyLong_DigitCount(x) ((Py_ssize_t) (((PyLongObject*)x)->long_value.lv_tag >> _PyLong_NON_SIZE_BITS))
- #define __Pyx_PyLong_SignedDigitCount(x)\
- ((1 - (Py_ssize_t) __Pyx_PyLong_Sign(x)) * __Pyx_PyLong_DigitCount(x))
- #if defined(PyUnstable_Long_IsCompact) && defined(PyUnstable_Long_CompactValue)
- #define __Pyx_PyLong_IsCompact(x) PyUnstable_Long_IsCompact((PyLongObject*) x)
- #define __Pyx_PyLong_CompactValue(x) PyUnstable_Long_CompactValue((PyLongObject*) x)
- #else
- #define __Pyx_PyLong_IsCompact(x) (((PyLongObject*)x)->long_value.lv_tag < (2 << _PyLong_NON_SIZE_BITS))
- #define __Pyx_PyLong_CompactValue(x) ((1 - (Py_ssize_t) __Pyx_PyLong_Sign(x)) * (Py_ssize_t) __Pyx_PyLong_Digits(x)[0])
- #endif
- typedef Py_ssize_t __Pyx_compact_pylong;
- typedef size_t __Pyx_compact_upylong;
- #else // Py < 3.12
- #define __Pyx_PyLong_IsNeg(x) (Py_SIZE(x) < 0)
- #define __Pyx_PyLong_IsNonNeg(x) (Py_SIZE(x) >= 0)
- #define __Pyx_PyLong_IsZero(x) (Py_SIZE(x) == 0)
- #define __Pyx_PyLong_IsPos(x) (Py_SIZE(x) > 0)
- #define __Pyx_PyLong_CompactValueUnsigned(x) ((Py_SIZE(x) == 0) ? 0 : __Pyx_PyLong_Digits(x)[0])
- #define __Pyx_PyLong_DigitCount(x) __Pyx_sst_abs(Py_SIZE(x))
- #define __Pyx_PyLong_SignedDigitCount(x) Py_SIZE(x)
- #define __Pyx_PyLong_IsCompact(x) (Py_SIZE(x) == 0 || Py_SIZE(x) == 1 || Py_SIZE(x) == -1)
- #define __Pyx_PyLong_CompactValue(x)\
- ((Py_SIZE(x) == 0) ? (sdigit) 0 : ((Py_SIZE(x) < 0) ? -(sdigit)__Pyx_PyLong_Digits(x)[0] : (sdigit)__Pyx_PyLong_Digits(x)[0]))
- typedef sdigit __Pyx_compact_pylong;
- typedef digit __Pyx_compact_upylong;
- #endif
- #if PY_VERSION_HEX >= 0x030C00A5
- #define __Pyx_PyLong_Digits(x) (((PyLongObject*)x)->long_value.ob_digit)
- #else
- #define __Pyx_PyLong_Digits(x) (((PyLongObject*)x)->ob_digit)
- #endif
-#endif
-#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
-static int __Pyx_sys_getdefaultencoding_not_ascii;
-static int __Pyx_init_sys_getdefaultencoding_params(void) {
- PyObject* sys;
- PyObject* default_encoding = NULL;
- PyObject* ascii_chars_u = NULL;
- PyObject* ascii_chars_b = NULL;
- const char* default_encoding_c;
- sys = PyImport_ImportModule("sys");
- if (!sys) goto bad;
- default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL);
- Py_DECREF(sys);
- if (!default_encoding) goto bad;
- default_encoding_c = PyBytes_AsString(default_encoding);
- if (!default_encoding_c) goto bad;
- if (strcmp(default_encoding_c, "ascii") == 0) {
- __Pyx_sys_getdefaultencoding_not_ascii = 0;
- } else {
- char ascii_chars[128];
- int c;
- for (c = 0; c < 128; c++) {
- ascii_chars[c] = (char) c;
- }
- __Pyx_sys_getdefaultencoding_not_ascii = 1;
- ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
- if (!ascii_chars_u) goto bad;
- ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
- if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
- PyErr_Format(
- PyExc_ValueError,
- "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
- default_encoding_c);
- goto bad;
- }
- Py_DECREF(ascii_chars_u);
- Py_DECREF(ascii_chars_b);
- }
- Py_DECREF(default_encoding);
- return 0;
-bad:
- Py_XDECREF(default_encoding);
- Py_XDECREF(ascii_chars_u);
- Py_XDECREF(ascii_chars_b);
- return -1;
-}
-#endif
-#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
-#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
-#else
-#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
-#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
-static char* __PYX_DEFAULT_STRING_ENCODING;
-static int __Pyx_init_sys_getdefaultencoding_params(void) {
- PyObject* sys;
- PyObject* default_encoding = NULL;
- char* default_encoding_c;
- sys = PyImport_ImportModule("sys");
- if (!sys) goto bad;
- default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
- Py_DECREF(sys);
- if (!default_encoding) goto bad;
- default_encoding_c = PyBytes_AsString(default_encoding);
- if (!default_encoding_c) goto bad;
- __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1);
- if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
- strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
- Py_DECREF(default_encoding);
- return 0;
-bad:
- Py_XDECREF(default_encoding);
- return -1;
-}
-#endif
-#endif
-
-
-/* Test for GCC > 2.95 */
-#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
- #define likely(x) __builtin_expect(!!(x), 1)
- #define unlikely(x) __builtin_expect(!!(x), 0)
-#else /* !__GNUC__ or GCC < 2.95 */
- #define likely(x) (x)
- #define unlikely(x) (x)
-#endif /* __GNUC__ */
-static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; }
-
-#if !CYTHON_USE_MODULE_STATE
-static PyObject *__pyx_m = NULL;
-#endif
-static int __pyx_lineno;
-static int __pyx_clineno = 0;
-static const char * __pyx_cfilenm = __FILE__;
-static const char *__pyx_filename;
-
-/* #### Code section: filename_table ### */
-
-static const char *__pyx_f[] = {
- "core.pyx",
- "",
-};
-/* #### Code section: utility_code_proto_before_types ### */
-/* ForceInitThreads.proto */
-#ifndef __PYX_FORCE_INIT_THREADS
- #define __PYX_FORCE_INIT_THREADS 0
-#endif
-
-/* NoFastGil.proto */
-#define __Pyx_PyGILState_Ensure PyGILState_Ensure
-#define __Pyx_PyGILState_Release PyGILState_Release
-#define __Pyx_FastGIL_Remember()
-#define __Pyx_FastGIL_Forget()
-#define __Pyx_FastGilFuncInit()
-
-/* BufferFormatStructs.proto */
-struct __Pyx_StructField_;
-#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0)
-typedef struct {
- const char* name;
- struct __Pyx_StructField_* fields;
- size_t size;
- size_t arraysize[8];
- int ndim;
- char typegroup;
- char is_unsigned;
- int flags;
-} __Pyx_TypeInfo;
-typedef struct __Pyx_StructField_ {
- __Pyx_TypeInfo* type;
- const char* name;
- size_t offset;
-} __Pyx_StructField;
-typedef struct {
- __Pyx_StructField* field;
- size_t parent_offset;
-} __Pyx_BufFmt_StackElem;
-typedef struct {
- __Pyx_StructField root;
- __Pyx_BufFmt_StackElem* head;
- size_t fmt_offset;
- size_t new_count, enc_count;
- size_t struct_alignment;
- int is_complex;
- char enc_type;
- char new_packmode;
- char enc_packmode;
- char is_valid_array;
-} __Pyx_BufFmt_Context;
-
-/* Atomics.proto */
-#include
-#ifndef CYTHON_ATOMICS
- #define CYTHON_ATOMICS 1
-#endif
-#define __PYX_CYTHON_ATOMICS_ENABLED() CYTHON_ATOMICS
-#define __pyx_atomic_int_type int
-#define __pyx_nonatomic_int_type int
-#if CYTHON_ATOMICS && (defined(__STDC_VERSION__) &&\
- (__STDC_VERSION__ >= 201112L) &&\
- !defined(__STDC_NO_ATOMICS__))
- #include
-#elif CYTHON_ATOMICS && (defined(__cplusplus) && (\
- (__cplusplus >= 201103L) ||\
- (defined(_MSC_VER) && _MSC_VER >= 1700)))
- #include
-#endif
-#if CYTHON_ATOMICS && (defined(__STDC_VERSION__) &&\
- (__STDC_VERSION__ >= 201112L) &&\
- !defined(__STDC_NO_ATOMICS__) &&\
- ATOMIC_INT_LOCK_FREE == 2)
- #undef __pyx_atomic_int_type
- #define __pyx_atomic_int_type atomic_int
- #define __pyx_atomic_incr_aligned(value) atomic_fetch_add_explicit(value, 1, memory_order_relaxed)
- #define __pyx_atomic_decr_aligned(value) atomic_fetch_sub_explicit(value, 1, memory_order_acq_rel)
- #if defined(__PYX_DEBUG_ATOMICS) && defined(_MSC_VER)
- #pragma message ("Using standard C atomics")
- #elif defined(__PYX_DEBUG_ATOMICS)
- #warning "Using standard C atomics"
- #endif
-#elif CYTHON_ATOMICS && (defined(__cplusplus) && (\
- (__cplusplus >= 201103L) ||\
-\
- (defined(_MSC_VER) && _MSC_VER >= 1700)) &&\
- ATOMIC_INT_LOCK_FREE == 2)
- #undef __pyx_atomic_int_type
- #define __pyx_atomic_int_type std::atomic_int
- #define __pyx_atomic_incr_aligned(value) std::atomic_fetch_add_explicit(value, 1, std::memory_order_relaxed)
- #define __pyx_atomic_decr_aligned(value) std::atomic_fetch_sub_explicit(value, 1, std::memory_order_acq_rel)
- #if defined(__PYX_DEBUG_ATOMICS) && defined(_MSC_VER)
- #pragma message ("Using standard C++ atomics")
- #elif defined(__PYX_DEBUG_ATOMICS)
- #warning "Using standard C++ atomics"
- #endif
-#elif CYTHON_ATOMICS && (__GNUC__ >= 5 || (__GNUC__ == 4 &&\
- (__GNUC_MINOR__ > 1 ||\
- (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL__ >= 2))))
- #define __pyx_atomic_incr_aligned(value) __sync_fetch_and_add(value, 1)
- #define __pyx_atomic_decr_aligned(value) __sync_fetch_and_sub(value, 1)
- #ifdef __PYX_DEBUG_ATOMICS
- #warning "Using GNU atomics"
- #endif
-#elif CYTHON_ATOMICS && defined(_MSC_VER)
- #include
- #undef __pyx_atomic_int_type
- #define __pyx_atomic_int_type long
- #define __pyx_nonatomic_int_type long
- #pragma intrinsic (_InterlockedExchangeAdd)
- #define __pyx_atomic_incr_aligned(value) _InterlockedExchangeAdd(value, 1)
- #define __pyx_atomic_decr_aligned(value) _InterlockedExchangeAdd(value, -1)
- #ifdef __PYX_DEBUG_ATOMICS
- #pragma message ("Using MSVC atomics")
- #endif
-#else
- #undef CYTHON_ATOMICS
- #define CYTHON_ATOMICS 0
- #ifdef __PYX_DEBUG_ATOMICS
- #warning "Not using atomics"
- #endif
-#endif
-#if CYTHON_ATOMICS
- #define __pyx_add_acquisition_count(memview)\
- __pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview))
- #define __pyx_sub_acquisition_count(memview)\
- __pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview))
-#else
- #define __pyx_add_acquisition_count(memview)\
- __pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
- #define __pyx_sub_acquisition_count(memview)\
- __pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
-#endif
-
-/* MemviewSliceStruct.proto */
-struct __pyx_memoryview_obj;
-typedef struct {
- struct __pyx_memoryview_obj *memview;
- char *data;
- Py_ssize_t shape[8];
- Py_ssize_t strides[8];
- Py_ssize_t suboffsets[8];
-} __Pyx_memviewslice;
-#define __Pyx_MemoryView_Len(m) (m.shape[0])
-
-/* #### Code section: numeric_typedefs ### */
-/* #### Code section: complex_type_declarations ### */
-/* #### Code section: type_declarations ### */
-
-/*--- Type declarations ---*/
-struct __pyx_array_obj;
-struct __pyx_MemviewEnum_obj;
-struct __pyx_memoryview_obj;
-struct __pyx_memoryviewslice_obj;
-struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each;
-
-/* "monotonic_align/core.pyx":7
- * @cython.boundscheck(False)
- * @cython.wraparound(False)
- * cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<<
- * cdef int x
- * cdef int y
- */
-struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each {
- int __pyx_n;
- float max_neg_val;
-};
-
-/* "View.MemoryView":114
- * @cython.collection_type("sequence")
- * @cname("__pyx_array")
- * cdef class array: # <<<<<<<<<<<<<<
- *
- * cdef:
- */
-struct __pyx_array_obj {
- PyObject_HEAD
- struct __pyx_vtabstruct_array *__pyx_vtab;
- char *data;
- Py_ssize_t len;
- char *format;
- int ndim;
- Py_ssize_t *_shape;
- Py_ssize_t *_strides;
- Py_ssize_t itemsize;
- PyObject *mode;
- PyObject *_format;
- void (*callback_free_data)(void *);
- int free_data;
- int dtype_is_object;
-};
-
-
-/* "View.MemoryView":302
- *
- * @cname('__pyx_MemviewEnum')
- * cdef class Enum(object): # <<<<<<<<<<<<<<
- * cdef object name
- * def __init__(self, name):
- */
-struct __pyx_MemviewEnum_obj {
- PyObject_HEAD
- PyObject *name;
-};
-
-
-/* "View.MemoryView":337
- *
- * @cname('__pyx_memoryview')
- * cdef class memoryview: # <<<<<<<<<<<<<<
- *
- * cdef object obj
- */
-struct __pyx_memoryview_obj {
- PyObject_HEAD
- struct __pyx_vtabstruct_memoryview *__pyx_vtab;
- PyObject *obj;
- PyObject *_size;
- PyObject *_array_interface;
- PyThread_type_lock lock;
- __pyx_atomic_int_type acquisition_count;
- Py_buffer view;
- int flags;
- int dtype_is_object;
- __Pyx_TypeInfo *typeinfo;
-};
-
-
-/* "View.MemoryView":952
- * @cython.collection_type("sequence")
- * @cname('__pyx_memoryviewslice')
- * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<<
- * "Internal class for passing memoryview slices to Python"
- *
- */
-struct __pyx_memoryviewslice_obj {
- struct __pyx_memoryview_obj __pyx_base;
- __Pyx_memviewslice from_slice;
- PyObject *from_object;
- PyObject *(*to_object_func)(char *);
- int (*to_dtype_func)(char *, PyObject *);
-};
-
-
-
-/* "View.MemoryView":114
- * @cython.collection_type("sequence")
- * @cname("__pyx_array")
- * cdef class array: # <<<<<<<<<<<<<<
- *
- * cdef:
- */
-
-struct __pyx_vtabstruct_array {
- PyObject *(*get_memview)(struct __pyx_array_obj *);
-};
-static struct __pyx_vtabstruct_array *__pyx_vtabptr_array;
-
-
-/* "View.MemoryView":337
- *
- * @cname('__pyx_memoryview')
- * cdef class memoryview: # <<<<<<<<<<<<<<
- *
- * cdef object obj
- */
-
-struct __pyx_vtabstruct_memoryview {
- char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *);
- PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *);
- PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *);
- PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *);
- PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *);
- PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *);
- PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *);
- PyObject *(*_get_base)(struct __pyx_memoryview_obj *);
-};
-static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview;
-
-
-/* "View.MemoryView":952
- * @cython.collection_type("sequence")
- * @cname('__pyx_memoryviewslice')
- * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<<
- * "Internal class for passing memoryview slices to Python"
- *
- */
-
-struct __pyx_vtabstruct__memoryviewslice {
- struct __pyx_vtabstruct_memoryview __pyx_base;
-};
-static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice;
-/* #### Code section: utility_code_proto ### */
-
-/* --- Runtime support code (head) --- */
-/* Refnanny.proto */
-#ifndef CYTHON_REFNANNY
- #define CYTHON_REFNANNY 0
-#endif
-#if CYTHON_REFNANNY
- typedef struct {
- void (*INCREF)(void*, PyObject*, Py_ssize_t);
- void (*DECREF)(void*, PyObject*, Py_ssize_t);
- void (*GOTREF)(void*, PyObject*, Py_ssize_t);
- void (*GIVEREF)(void*, PyObject*, Py_ssize_t);
- void* (*SetupContext)(const char*, Py_ssize_t, const char*);
- void (*FinishContext)(void**);
- } __Pyx_RefNannyAPIStruct;
- static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
- static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
- #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
-#ifdef WITH_THREAD
- #define __Pyx_RefNannySetupContext(name, acquire_gil)\
- if (acquire_gil) {\
- PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
- __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), (__LINE__), (__FILE__));\
- PyGILState_Release(__pyx_gilstate_save);\
- } else {\
- __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), (__LINE__), (__FILE__));\
- }
- #define __Pyx_RefNannyFinishContextNogil() {\
- PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
- __Pyx_RefNannyFinishContext();\
- PyGILState_Release(__pyx_gilstate_save);\
- }
-#else
- #define __Pyx_RefNannySetupContext(name, acquire_gil)\
- __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), (__LINE__), (__FILE__))
- #define __Pyx_RefNannyFinishContextNogil() __Pyx_RefNannyFinishContext()
-#endif
- #define __Pyx_RefNannyFinishContextNogil() {\
- PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
- __Pyx_RefNannyFinishContext();\
- PyGILState_Release(__pyx_gilstate_save);\
- }
- #define __Pyx_RefNannyFinishContext()\
- __Pyx_RefNanny->FinishContext(&__pyx_refnanny)
- #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), (__LINE__))
- #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), (__LINE__))
- #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), (__LINE__))
- #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), (__LINE__))
- #define __Pyx_XINCREF(r) do { if((r) == NULL); else {__Pyx_INCREF(r); }} while(0)
- #define __Pyx_XDECREF(r) do { if((r) == NULL); else {__Pyx_DECREF(r); }} while(0)
- #define __Pyx_XGOTREF(r) do { if((r) == NULL); else {__Pyx_GOTREF(r); }} while(0)
- #define __Pyx_XGIVEREF(r) do { if((r) == NULL); else {__Pyx_GIVEREF(r);}} while(0)
-#else
- #define __Pyx_RefNannyDeclarations
- #define __Pyx_RefNannySetupContext(name, acquire_gil)
- #define __Pyx_RefNannyFinishContextNogil()
- #define __Pyx_RefNannyFinishContext()
- #define __Pyx_INCREF(r) Py_INCREF(r)
- #define __Pyx_DECREF(r) Py_DECREF(r)
- #define __Pyx_GOTREF(r)
- #define __Pyx_GIVEREF(r)
- #define __Pyx_XINCREF(r) Py_XINCREF(r)
- #define __Pyx_XDECREF(r) Py_XDECREF(r)
- #define __Pyx_XGOTREF(r)
- #define __Pyx_XGIVEREF(r)
-#endif
-#define __Pyx_Py_XDECREF_SET(r, v) do {\
- PyObject *tmp = (PyObject *) r;\
- r = v; Py_XDECREF(tmp);\
- } while (0)
-#define __Pyx_XDECREF_SET(r, v) do {\
- PyObject *tmp = (PyObject *) r;\
- r = v; __Pyx_XDECREF(tmp);\
- } while (0)
-#define __Pyx_DECREF_SET(r, v) do {\
- PyObject *tmp = (PyObject *) r;\
- r = v; __Pyx_DECREF(tmp);\
- } while (0)
-#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
-#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
-
-/* PyErrExceptionMatches.proto */
-#if CYTHON_FAST_THREAD_STATE
-#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err)
-static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err);
-#else
-#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err)
-#endif
-
-/* PyThreadStateGet.proto */
-#if CYTHON_FAST_THREAD_STATE
-#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate;
-#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current;
-#if PY_VERSION_HEX >= 0x030C00A6
-#define __Pyx_PyErr_Occurred() (__pyx_tstate->current_exception != NULL)
-#define __Pyx_PyErr_CurrentExceptionType() (__pyx_tstate->current_exception ? (PyObject*) Py_TYPE(__pyx_tstate->current_exception) : (PyObject*) NULL)
-#else
-#define __Pyx_PyErr_Occurred() (__pyx_tstate->curexc_type != NULL)
-#define __Pyx_PyErr_CurrentExceptionType() (__pyx_tstate->curexc_type)
-#endif
-#else
-#define __Pyx_PyThreadState_declare
-#define __Pyx_PyThreadState_assign
-#define __Pyx_PyErr_Occurred() (PyErr_Occurred() != NULL)
-#define __Pyx_PyErr_CurrentExceptionType() PyErr_Occurred()
-#endif
-
-/* PyErrFetchRestore.proto */
-#if CYTHON_FAST_THREAD_STATE
-#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL)
-#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb)
-#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb)
-#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb)
-#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb)
-static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
-static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
-#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030C00A6
-#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL))
-#else
-#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
-#endif
-#else
-#define __Pyx_PyErr_Clear() PyErr_Clear()
-#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
-#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb)
-#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb)
-#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb)
-#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb)
-#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb)
-#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb)
-#endif
-
-/* PyObjectGetAttrStr.proto */
-#if CYTHON_USE_TYPE_SLOTS
-static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name);
-#else
-#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
-#endif
-
-/* PyObjectGetAttrStrNoError.proto */
-static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name);
-
-/* GetBuiltinName.proto */
-static PyObject *__Pyx_GetBuiltinName(PyObject *name);
-
-/* TupleAndListFromArray.proto */
-#if CYTHON_COMPILING_IN_CPYTHON
-static CYTHON_INLINE PyObject* __Pyx_PyList_FromArray(PyObject *const *src, Py_ssize_t n);
-static CYTHON_INLINE PyObject* __Pyx_PyTuple_FromArray(PyObject *const *src, Py_ssize_t n);
-#endif
-
-/* IncludeStringH.proto */
-#include
-
-/* BytesEquals.proto */
-static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals);
-
-/* UnicodeEquals.proto */
-static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals);
-
-/* fastcall.proto */
-#define __Pyx_Arg_VARARGS(args, i) PyTuple_GET_ITEM(args, i)
-#define __Pyx_NumKwargs_VARARGS(kwds) PyDict_Size(kwds)
-#define __Pyx_KwValues_VARARGS(args, nargs) NULL
-#define __Pyx_GetKwValue_VARARGS(kw, kwvalues, s) __Pyx_PyDict_GetItemStrWithError(kw, s)
-#define __Pyx_KwargsAsDict_VARARGS(kw, kwvalues) PyDict_Copy(kw)
-#if CYTHON_METH_FASTCALL
- #define __Pyx_Arg_FASTCALL(args, i) args[i]
- #define __Pyx_NumKwargs_FASTCALL(kwds) PyTuple_GET_SIZE(kwds)
- #define __Pyx_KwValues_FASTCALL(args, nargs) ((args) + (nargs))
- static CYTHON_INLINE PyObject * __Pyx_GetKwValue_FASTCALL(PyObject *kwnames, PyObject *const *kwvalues, PyObject *s);
- #define __Pyx_KwargsAsDict_FASTCALL(kw, kwvalues) _PyStack_AsDict(kwvalues, kw)
-#else
- #define __Pyx_Arg_FASTCALL __Pyx_Arg_VARARGS
- #define __Pyx_NumKwargs_FASTCALL __Pyx_NumKwargs_VARARGS
- #define __Pyx_KwValues_FASTCALL __Pyx_KwValues_VARARGS
- #define __Pyx_GetKwValue_FASTCALL __Pyx_GetKwValue_VARARGS
- #define __Pyx_KwargsAsDict_FASTCALL __Pyx_KwargsAsDict_VARARGS
-#endif
-#if CYTHON_COMPILING_IN_CPYTHON
-#define __Pyx_ArgsSlice_VARARGS(args, start, stop) __Pyx_PyTuple_FromArray(&__Pyx_Arg_VARARGS(args, start), stop - start)
-#define __Pyx_ArgsSlice_FASTCALL(args, start, stop) __Pyx_PyTuple_FromArray(&__Pyx_Arg_FASTCALL(args, start), stop - start)
-#else
-#define __Pyx_ArgsSlice_VARARGS(args, start, stop) PyTuple_GetSlice(args, start, stop)
-#define __Pyx_ArgsSlice_FASTCALL(args, start, stop) PyTuple_GetSlice(args, start, stop)
-#endif
-
-/* RaiseArgTupleInvalid.proto */
-static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
- Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found);
-
-/* RaiseDoubleKeywords.proto */
-static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
-
-/* ParseKeywords.proto */
-static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject *const *kwvalues,
- PyObject **argnames[],
- PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,
- const char* function_name);
-
-/* ArgTypeTest.proto */
-#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\
- ((likely(__Pyx_IS_TYPE(obj, type) | (none_allowed && (obj == Py_None)))) ? 1 :\
- __Pyx__ArgTypeTest(obj, type, name, exact))
-static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact);
-
-/* RaiseException.proto */
-static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
-
-/* PyFunctionFastCall.proto */
-#if CYTHON_FAST_PYCALL
-#if !CYTHON_VECTORCALL
-#define __Pyx_PyFunction_FastCall(func, args, nargs)\
- __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL)
-static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs);
-#endif
-#define __Pyx_BUILD_ASSERT_EXPR(cond)\
- (sizeof(char [1 - 2*!(cond)]) - 1)
-#ifndef Py_MEMBER_SIZE
-#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member)
-#endif
-#if !CYTHON_VECTORCALL
-#if PY_VERSION_HEX >= 0x03080000
- #include "frameobject.h"
-#if PY_VERSION_HEX >= 0x030b00a6
- #ifndef Py_BUILD_CORE
- #define Py_BUILD_CORE 1
- #endif
- #include "internal/pycore_frame.h"
-#endif
- #define __Pxy_PyFrame_Initialize_Offsets()
- #define __Pyx_PyFrame_GetLocalsplus(frame) ((frame)->f_localsplus)
-#else
- static size_t __pyx_pyframe_localsplus_offset = 0;
- #include "frameobject.h"
- #define __Pxy_PyFrame_Initialize_Offsets()\
- ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\
- (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus)))
- #define __Pyx_PyFrame_GetLocalsplus(frame)\
- (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset))
-#endif
-#endif
-#endif
-
-/* PyObjectCall.proto */
-#if CYTHON_COMPILING_IN_CPYTHON
-static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
-#else
-#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
-#endif
-
-/* PyObjectCallMethO.proto */
-#if CYTHON_COMPILING_IN_CPYTHON
-static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg);
-#endif
-
-/* PyObjectFastCall.proto */
-#define __Pyx_PyObject_FastCall(func, args, nargs) __Pyx_PyObject_FastCallDict(func, args, (size_t)(nargs), NULL)
-static CYTHON_INLINE PyObject* __Pyx_PyObject_FastCallDict(PyObject *func, PyObject **args, size_t nargs, PyObject *kwargs);
-
-/* RaiseUnexpectedTypeError.proto */
-static int __Pyx_RaiseUnexpectedTypeError(const char *expected, PyObject *obj);
-
-/* GCCDiagnostics.proto */
-#if !defined(__INTEL_COMPILER) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))
-#define __Pyx_HAS_GCC_DIAGNOSTIC
-#endif
-
-/* BuildPyUnicode.proto */
-static PyObject* __Pyx_PyUnicode_BuildFromAscii(Py_ssize_t ulength, char* chars, int clength,
- int prepend_sign, char padding_char);
-
-/* CIntToPyUnicode.proto */
-static CYTHON_INLINE PyObject* __Pyx_PyUnicode_From_int(int value, Py_ssize_t width, char padding_char, char format_char);
-
-/* CIntToPyUnicode.proto */
-static CYTHON_INLINE PyObject* __Pyx_PyUnicode_From_Py_ssize_t(Py_ssize_t value, Py_ssize_t width, char padding_char, char format_char);
-
-/* JoinPyUnicode.proto */
-static PyObject* __Pyx_PyUnicode_Join(PyObject* value_tuple, Py_ssize_t value_count, Py_ssize_t result_ulength,
- Py_UCS4 max_char);
-
-/* StrEquals.proto */
-#if PY_MAJOR_VERSION >= 3
-#define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals
-#else
-#define __Pyx_PyString_Equals __Pyx_PyBytes_Equals
-#endif
-
-/* PyObjectFormatSimple.proto */
-#if CYTHON_COMPILING_IN_PYPY
- #define __Pyx_PyObject_FormatSimple(s, f) (\
- likely(PyUnicode_CheckExact(s)) ? (Py_INCREF(s), s) :\
- PyObject_Format(s, f))
-#elif PY_MAJOR_VERSION < 3
- #define __Pyx_PyObject_FormatSimple(s, f) (\
- likely(PyUnicode_CheckExact(s)) ? (Py_INCREF(s), s) :\
- likely(PyString_CheckExact(s)) ? PyUnicode_FromEncodedObject(s, NULL, "strict") :\
- PyObject_Format(s, f))
-#elif CYTHON_USE_TYPE_SLOTS
- #define __Pyx_PyObject_FormatSimple(s, f) (\
- likely(PyUnicode_CheckExact(s)) ? (Py_INCREF(s), s) :\
- likely(PyLong_CheckExact(s)) ? PyLong_Type.tp_repr(s) :\
- likely(PyFloat_CheckExact(s)) ? PyFloat_Type.tp_repr(s) :\
- PyObject_Format(s, f))
-#else
- #define __Pyx_PyObject_FormatSimple(s, f) (\
- likely(PyUnicode_CheckExact(s)) ? (Py_INCREF(s), s) :\
- PyObject_Format(s, f))
-#endif
-
-CYTHON_UNUSED static int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
-static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/
-/* GetAttr.proto */
-static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *);
-
-/* GetItemInt.proto */
-#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
- (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
- __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\
- (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\
- __Pyx_GetItemInt_Generic(o, to_py_func(i))))
-#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
- (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
- __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
- (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL))
-static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
- int wraparound, int boundscheck);
-#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
- (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
- __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
- (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL))
-static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
- int wraparound, int boundscheck);
-static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j);
-static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
- int is_list, int wraparound, int boundscheck);
-
-/* PyObjectCallOneArg.proto */
-static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg);
-
-/* ObjectGetItem.proto */
-#if CYTHON_USE_TYPE_SLOTS
-static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject *key);
-#else
-#define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key)
-#endif
-
-/* KeywordStringCheck.proto */
-static int __Pyx_CheckKeywordStrings(PyObject *kw, const char* function_name, int kw_allowed);
-
-/* DivInt[Py_ssize_t].proto */
-static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t);
-
-/* UnaryNegOverflows.proto */
-#define __Pyx_UNARY_NEG_WOULD_OVERFLOW(x)\
- (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x)))
-
-/* GetAttr3.proto */
-static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *);
-
-/* PyDictVersioning.proto */
-#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
-#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1)
-#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag)
-#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\
- (version_var) = __PYX_GET_DICT_VERSION(dict);\
- (cache_var) = (value);
-#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\
- static PY_UINT64_T __pyx_dict_version = 0;\
- static PyObject *__pyx_dict_cached_value = NULL;\
- if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\
- (VAR) = __pyx_dict_cached_value;\
- } else {\
- (VAR) = __pyx_dict_cached_value = (LOOKUP);\
- __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\
- }\
-}
-static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj);
-static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj);
-static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version);
-#else
-#define __PYX_GET_DICT_VERSION(dict) (0)
-#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)
-#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP);
-#endif
-
-/* GetModuleGlobalName.proto */
-#if CYTHON_USE_DICT_VERSIONS
-#define __Pyx_GetModuleGlobalName(var, name) do {\
- static PY_UINT64_T __pyx_dict_version = 0;\
- static PyObject *__pyx_dict_cached_value = NULL;\
- (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\
- (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\
- __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
-} while(0)
-#define __Pyx_GetModuleGlobalNameUncached(var, name) do {\
- PY_UINT64_T __pyx_dict_version;\
- PyObject *__pyx_dict_cached_value;\
- (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
-} while(0)
-static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value);
-#else
-#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name)
-#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name)
-static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name);
-#endif
-
-/* AssertionsEnabled.proto */
-#define __Pyx_init_assertions_enabled()
-#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
- #define __pyx_assertions_enabled() (1)
-#elif PY_VERSION_HEX < 0x03080000 || CYTHON_COMPILING_IN_PYPY || defined(Py_LIMITED_API)
- #define __pyx_assertions_enabled() (!Py_OptimizeFlag)
-#elif CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030900A6
- static int __pyx_assertions_enabled_flag;
- #define __pyx_assertions_enabled() (__pyx_assertions_enabled_flag)
- #undef __Pyx_init_assertions_enabled
- static void __Pyx_init_assertions_enabled(void) {
- __pyx_assertions_enabled_flag = ! _PyInterpreterState_GetConfig(__Pyx_PyThreadState_Current->interp)->optimization_level;
- }
-#else
- #define __pyx_assertions_enabled() (!Py_OptimizeFlag)
-#endif
-
-/* RaiseTooManyValuesToUnpack.proto */
-static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
-
-/* RaiseNeedMoreValuesToUnpack.proto */
-static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
-
-/* RaiseNoneIterError.proto */
-static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
-
-/* ExtTypeTest.proto */
-static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type);
-
-/* GetTopmostException.proto */
-#if CYTHON_USE_EXC_INFO_STACK && CYTHON_FAST_THREAD_STATE
-static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate);
-#endif
-
-/* SaveResetException.proto */
-#if CYTHON_FAST_THREAD_STATE
-#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb)
-static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
-#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb)
-static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
-#else
-#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb)
-#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb)
-#endif
-
-/* GetException.proto */
-#if CYTHON_FAST_THREAD_STATE
-#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb)
-static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
-#else
-static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb);
-#endif
-
-/* SwapException.proto */
-#if CYTHON_FAST_THREAD_STATE
-#define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb)
-static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
-#else
-static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb);
-#endif
-
-/* Import.proto */
-static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
-
-/* ImportDottedModule.proto */
-static PyObject *__Pyx_ImportDottedModule(PyObject *name, PyObject *parts_tuple);
-#if PY_MAJOR_VERSION >= 3
-static PyObject *__Pyx_ImportDottedModule_WalkParts(PyObject *module, PyObject *name, PyObject *parts_tuple);
-#endif
-
-/* ssize_strlen.proto */
-static CYTHON_INLINE Py_ssize_t __Pyx_ssize_strlen(const char *s);
-
-/* FastTypeChecks.proto */
-#if CYTHON_COMPILING_IN_CPYTHON
-#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type)
-#define __Pyx_TypeCheck2(obj, type1, type2) __Pyx_IsAnySubtype2(Py_TYPE(obj), (PyTypeObject *)type1, (PyTypeObject *)type2)
-static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b);
-static CYTHON_INLINE int __Pyx_IsAnySubtype2(PyTypeObject *cls, PyTypeObject *a, PyTypeObject *b);
-static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type);
-static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2);
-#else
-#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
-#define __Pyx_TypeCheck2(obj, type1, type2) (PyObject_TypeCheck(obj, (PyTypeObject *)type1) || PyObject_TypeCheck(obj, (PyTypeObject *)type2))
-#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type)
-#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2))
-#endif
-#define __Pyx_PyErr_ExceptionMatches2(err1, err2) __Pyx_PyErr_GivenExceptionMatches2(__Pyx_PyErr_CurrentExceptionType(), err1, err2)
-#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception)
-
-CYTHON_UNUSED static int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
-/* ListCompAppend.proto */
-#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
-static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) {
- PyListObject* L = (PyListObject*) list;
- Py_ssize_t len = Py_SIZE(list);
- if (likely(L->allocated > len)) {
- Py_INCREF(x);
- PyList_SET_ITEM(list, len, x);
- __Pyx_SET_SIZE(list, len + 1);
- return 0;
- }
- return PyList_Append(list, x);
-}
-#else
-#define __Pyx_ListComp_Append(L,x) PyList_Append(L,x)
-#endif
-
-/* PySequenceMultiply.proto */
-#define __Pyx_PySequence_Multiply_Left(mul, seq) __Pyx_PySequence_Multiply(seq, mul)
-static CYTHON_INLINE PyObject* __Pyx_PySequence_Multiply(PyObject *seq, Py_ssize_t mul);
-
-/* SetItemInt.proto */
-#define __Pyx_SetItemInt(o, i, v, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
- (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
- __Pyx_SetItemInt_Fast(o, (Py_ssize_t)i, v, is_list, wraparound, boundscheck) :\
- (is_list ? (PyErr_SetString(PyExc_IndexError, "list assignment index out of range"), -1) :\
- __Pyx_SetItemInt_Generic(o, to_py_func(i), v)))
-static int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v);
-static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v,
- int is_list, int wraparound, int boundscheck);
-
-/* RaiseUnboundLocalError.proto */
-static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname);
-
-/* DivInt[long].proto */
-static CYTHON_INLINE long __Pyx_div_long(long, long);
-
-/* PySequenceContains.proto */
-static CYTHON_INLINE int __Pyx_PySequence_ContainsTF(PyObject* item, PyObject* seq, int eq) {
- int result = PySequence_Contains(seq, item);
- return unlikely(result < 0) ? result : (result == (eq == Py_EQ));
-}
-
-/* ImportFrom.proto */
-static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name);
-
-/* HasAttr.proto */
-static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *);
-
-/* ErrOccurredWithGIL.proto */
-static CYTHON_INLINE int __Pyx_ErrOccurredWithGIL(void);
-
-/* PyObject_GenericGetAttrNoDict.proto */
-#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
-static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name);
-#else
-#define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr
-#endif
-
-/* PyObject_GenericGetAttr.proto */
-#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
-static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name);
-#else
-#define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr
-#endif
-
-/* IncludeStructmemberH.proto */
-#include
-
-/* FixUpExtensionType.proto */
-#if CYTHON_USE_TYPE_SPECS
-static int __Pyx_fix_up_extension_type_from_spec(PyType_Spec *spec, PyTypeObject *type);
-#endif
-
-/* PyObjectCallNoArg.proto */
-static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func);
-
-/* PyObjectGetMethod.proto */
-static int __Pyx_PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method);
-
-/* PyObjectCallMethod0.proto */
-static PyObject* __Pyx_PyObject_CallMethod0(PyObject* obj, PyObject* method_name);
-
-/* ValidateBasesTuple.proto */
-#if CYTHON_COMPILING_IN_CPYTHON || CYTHON_COMPILING_IN_LIMITED_API || CYTHON_USE_TYPE_SPECS
-static int __Pyx_validate_bases_tuple(const char *type_name, Py_ssize_t dictoffset, PyObject *bases);
-#endif
-
-/* PyType_Ready.proto */
-CYTHON_UNUSED static int __Pyx_PyType_Ready(PyTypeObject *t);
-
-/* SetVTable.proto */
-static int __Pyx_SetVtable(PyTypeObject* typeptr , void* vtable);
-
-/* GetVTable.proto */
-static void* __Pyx_GetVtable(PyTypeObject *type);
-
-/* MergeVTables.proto */
-#if !CYTHON_COMPILING_IN_LIMITED_API
-static int __Pyx_MergeVtables(PyTypeObject *type);
-#endif
-
-/* SetupReduce.proto */
-#if !CYTHON_COMPILING_IN_LIMITED_API
-static int __Pyx_setup_reduce(PyObject* type_obj);
-#endif
-
-/* FetchSharedCythonModule.proto */
-static PyObject *__Pyx_FetchSharedCythonABIModule(void);
-
-/* FetchCommonType.proto */
-#if !CYTHON_USE_TYPE_SPECS
-static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type);
-#else
-static PyTypeObject* __Pyx_FetchCommonTypeFromSpec(PyObject *module, PyType_Spec *spec, PyObject *bases);
-#endif
-
-/* PyMethodNew.proto */
-#if PY_MAJOR_VERSION >= 3
-static PyObject *__Pyx_PyMethod_New(PyObject *func, PyObject *self, PyObject *typ) {
- CYTHON_UNUSED_VAR(typ);
- if (!self)
- return __Pyx_NewRef(func);
- return PyMethod_New(func, self);
-}
-#else
- #define __Pyx_PyMethod_New PyMethod_New
-#endif
-
-/* PyVectorcallFastCallDict.proto */
-#if CYTHON_METH_FASTCALL
-static CYTHON_INLINE PyObject *__Pyx_PyVectorcall_FastCallDict(PyObject *func, __pyx_vectorcallfunc vc, PyObject *const *args, size_t nargs, PyObject *kw);
-#endif
-
-/* CythonFunctionShared.proto */
-#define __Pyx_CyFunction_USED
-#define __Pyx_CYFUNCTION_STATICMETHOD 0x01
-#define __Pyx_CYFUNCTION_CLASSMETHOD 0x02
-#define __Pyx_CYFUNCTION_CCLASS 0x04
-#define __Pyx_CYFUNCTION_COROUTINE 0x08
-#define __Pyx_CyFunction_GetClosure(f)\
- (((__pyx_CyFunctionObject *) (f))->func_closure)
-#if PY_VERSION_HEX < 0x030900B1
- #define __Pyx_CyFunction_GetClassObj(f)\
- (((__pyx_CyFunctionObject *) (f))->func_classobj)
-#else
- #define __Pyx_CyFunction_GetClassObj(f)\
- ((PyObject*) ((PyCMethodObject *) (f))->mm_class)
-#endif
-#define __Pyx_CyFunction_SetClassObj(f, classobj)\
- __Pyx__CyFunction_SetClassObj((__pyx_CyFunctionObject *) (f), (classobj))
-#define __Pyx_CyFunction_Defaults(type, f)\
- ((type *)(((__pyx_CyFunctionObject *) (f))->defaults))
-#define __Pyx_CyFunction_SetDefaultsGetter(f, g)\
- ((__pyx_CyFunctionObject *) (f))->defaults_getter = (g)
-typedef struct {
-#if PY_VERSION_HEX < 0x030900B1
- PyCFunctionObject func;
-#else
- PyCMethodObject func;
-#endif
-#if CYTHON_BACKPORT_VECTORCALL
- __pyx_vectorcallfunc func_vectorcall;
-#endif
-#if PY_VERSION_HEX < 0x030500A0
- PyObject *func_weakreflist;
-#endif
- PyObject *func_dict;
- PyObject *func_name;
- PyObject *func_qualname;
- PyObject *func_doc;
- PyObject *func_globals;
- PyObject *func_code;
- PyObject *func_closure;
-#if PY_VERSION_HEX < 0x030900B1
- PyObject *func_classobj;
-#endif
- void *defaults;
- int defaults_pyobjects;
- size_t defaults_size; // used by FusedFunction for copying defaults
- int flags;
- PyObject *defaults_tuple;
- PyObject *defaults_kwdict;
- PyObject *(*defaults_getter)(PyObject *);
- PyObject *func_annotations;
- PyObject *func_is_coroutine;
-} __pyx_CyFunctionObject;
-#define __Pyx_CyFunction_Check(obj) __Pyx_TypeCheck(obj, __pyx_CyFunctionType)
-#define __Pyx_IsCyOrPyCFunction(obj) __Pyx_TypeCheck2(obj, __pyx_CyFunctionType, &PyCFunction_Type)
-#define __Pyx_CyFunction_CheckExact(obj) __Pyx_IS_TYPE(obj, __pyx_CyFunctionType)
-static PyObject *__Pyx_CyFunction_Init(__pyx_CyFunctionObject* op, PyMethodDef *ml,
- int flags, PyObject* qualname,
- PyObject *closure,
- PyObject *module, PyObject *globals,
- PyObject* code);
-static CYTHON_INLINE void __Pyx__CyFunction_SetClassObj(__pyx_CyFunctionObject* f, PyObject* classobj);
-static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *m,
- size_t size,
- int pyobjects);
-static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *m,
- PyObject *tuple);
-static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *m,
- PyObject *dict);
-static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *m,
- PyObject *dict);
-static int __pyx_CyFunction_init(PyObject *module);
-#if CYTHON_METH_FASTCALL
-static PyObject * __Pyx_CyFunction_Vectorcall_NOARGS(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames);
-static PyObject * __Pyx_CyFunction_Vectorcall_O(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames);
-static PyObject * __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames);
-static PyObject * __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS_METHOD(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames);
-#if CYTHON_BACKPORT_VECTORCALL
-#define __Pyx_CyFunction_func_vectorcall(f) (((__pyx_CyFunctionObject*)f)->func_vectorcall)
-#else
-#define __Pyx_CyFunction_func_vectorcall(f) (((PyCFunctionObject*)f)->vectorcall)
-#endif
-#endif
-
-/* CythonFunction.proto */
-static PyObject *__Pyx_CyFunction_New(PyMethodDef *ml,
- int flags, PyObject* qualname,
- PyObject *closure,
- PyObject *module, PyObject *globals,
- PyObject* code);
-
-/* CLineInTraceback.proto */
-#ifdef CYTHON_CLINE_IN_TRACEBACK
-#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0)
-#else
-static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line);
-#endif
-
-/* CodeObjectCache.proto */
-#if !CYTHON_COMPILING_IN_LIMITED_API
-typedef struct {
- PyCodeObject* code_object;
- int code_line;
-} __Pyx_CodeObjectCacheEntry;
-struct __Pyx_CodeObjectCache {
- int count;
- int max_count;
- __Pyx_CodeObjectCacheEntry* entries;
-};
-static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
-static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
-static PyCodeObject *__pyx_find_code_object(int code_line);
-static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
-#endif
-
-/* AddTraceback.proto */
-static void __Pyx_AddTraceback(const char *funcname, int c_line,
- int py_line, const char *filename);
-
-#if PY_MAJOR_VERSION < 3
- static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags);
- static void __Pyx_ReleaseBuffer(Py_buffer *view);
-#else
- #define __Pyx_GetBuffer PyObject_GetBuffer
- #define __Pyx_ReleaseBuffer PyBuffer_Release
-#endif
-
-
-/* BufferStructDeclare.proto */
-typedef struct {
- Py_ssize_t shape, strides, suboffsets;
-} __Pyx_Buf_DimInfo;
-typedef struct {
- size_t refcount;
- Py_buffer pybuffer;
-} __Pyx_Buffer;
-typedef struct {
- __Pyx_Buffer *rcbuffer;
- char *data;
- __Pyx_Buf_DimInfo diminfo[8];
-} __Pyx_LocalBuf_ND;
-
-/* MemviewSliceIsContig.proto */
-static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim);
-
-/* OverlappingSlices.proto */
-static int __pyx_slices_overlap(__Pyx_memviewslice *slice1,
- __Pyx_memviewslice *slice2,
- int ndim, size_t itemsize);
-
-/* IsLittleEndian.proto */
-static CYTHON_INLINE int __Pyx_Is_Little_Endian(void);
-
-/* BufferFormatCheck.proto */
-static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts);
-static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
- __Pyx_BufFmt_StackElem* stack,
- __Pyx_TypeInfo* type);
-
-/* TypeInfoCompare.proto */
-static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b);
-
-/* MemviewSliceValidateAndInit.proto */
-static int __Pyx_ValidateAndInit_memviewslice(
- int *axes_specs,
- int c_or_f_flag,
- int buf_flags,
- int ndim,
- __Pyx_TypeInfo *dtype,
- __Pyx_BufFmt_StackElem stack[],
- __Pyx_memviewslice *memviewslice,
- PyObject *original_obj);
-
-/* ObjectToMemviewSlice.proto */
-static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_int(PyObject *, int writable_flag);
-
-/* ObjectToMemviewSlice.proto */
-static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(PyObject *, int writable_flag);
-
-/* ObjectToMemviewSlice.proto */
-static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *, int writable_flag);
-
-/* MemviewSliceCopyTemplate.proto */
-static __Pyx_memviewslice
-__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
- const char *mode, int ndim,
- size_t sizeof_dtype, int contig_flag,
- int dtype_is_object);
-
-/* MemviewSliceInit.proto */
-#define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d
-#define __Pyx_MEMVIEW_DIRECT 1
-#define __Pyx_MEMVIEW_PTR 2
-#define __Pyx_MEMVIEW_FULL 4
-#define __Pyx_MEMVIEW_CONTIG 8
-#define __Pyx_MEMVIEW_STRIDED 16
-#define __Pyx_MEMVIEW_FOLLOW 32
-#define __Pyx_IS_C_CONTIG 1
-#define __Pyx_IS_F_CONTIG 2
-static int __Pyx_init_memviewslice(
- struct __pyx_memoryview_obj *memview,
- int ndim,
- __Pyx_memviewslice *memviewslice,
- int memview_is_new_reference);
-static CYTHON_INLINE int __pyx_add_acquisition_count_locked(
- __pyx_atomic_int_type *acquisition_count, PyThread_type_lock lock);
-static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(
- __pyx_atomic_int_type *acquisition_count, PyThread_type_lock lock);
-#define __pyx_get_slice_count_pointer(memview) (&memview->acquisition_count)
-#define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__)
-#define __PYX_XCLEAR_MEMVIEW(slice, have_gil) __Pyx_XCLEAR_MEMVIEW(slice, have_gil, __LINE__)
-static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int);
-static CYTHON_INLINE void __Pyx_XCLEAR_MEMVIEW(__Pyx_memviewslice *, int, int);
-
-/* CIntToPy.proto */
-static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value);
-
-/* CIntFromPy.proto */
-static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
-
-/* CIntToPy.proto */
-static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
-
-/* CIntFromPy.proto */
-static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
-
-/* CIntFromPy.proto */
-static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *);
-
-/* FormatTypeName.proto */
-#if CYTHON_COMPILING_IN_LIMITED_API
-typedef PyObject *__Pyx_TypeName;
-#define __Pyx_FMT_TYPENAME "%U"
-static __Pyx_TypeName __Pyx_PyType_GetName(PyTypeObject* tp);
-#define __Pyx_DECREF_TypeName(obj) Py_XDECREF(obj)
-#else
-typedef const char *__Pyx_TypeName;
-#define __Pyx_FMT_TYPENAME "%.200s"
-#define __Pyx_PyType_GetName(tp) ((tp)->tp_name)
-#define __Pyx_DECREF_TypeName(obj)
-#endif
-
-/* CheckBinaryVersion.proto */
-static int __Pyx_check_binary_version(void);
-
-/* InitStrings.proto */
-static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
-
-/* #### Code section: module_declarations ### */
-static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/
-static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/
-static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/
-static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/
-static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/
-static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/
-static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/
-static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/
-static PyObject *__pyx_memoryview__get_base(struct __pyx_memoryview_obj *__pyx_v_self); /* proto*/
-static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/
-static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/
-static PyObject *__pyx_memoryviewslice__get_base(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto*/
-
-/* Module declarations from "cython.view" */
-
-/* Module declarations from "cython.dataclasses" */
-
-/* Module declarations from "cython" */
-
-/* Module declarations from "monotonic_align.core" */
-static PyObject *__pyx_collections_abc_Sequence = 0;
-static PyObject *generic = 0;
-static PyObject *strided = 0;
-static PyObject *indirect = 0;
-static PyObject *contiguous = 0;
-static PyObject *indirect_contiguous = 0;
-static int __pyx_memoryview_thread_locks_used;
-static PyThread_type_lock __pyx_memoryview_thread_locks[8];
-static void __pyx_f_15monotonic_align_4core_maximum_path_each(__Pyx_memviewslice, __Pyx_memviewslice, int, int, struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each *__pyx_optional_args); /*proto*/
-static void __pyx_f_15monotonic_align_4core_maximum_path_c(__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, int __pyx_skip_dispatch); /*proto*/
-static int __pyx_array_allocate_buffer(struct __pyx_array_obj *); /*proto*/
-static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/
-static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/
-static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/
-static PyObject *_unellipsify(PyObject *, int); /*proto*/
-static int assert_direct_dimensions(Py_ssize_t *, int); /*proto*/
-static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/
-static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/
-static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/
-static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/
-static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/
-static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
-static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
-static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/
-static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
-static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/
-static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/
-static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/
-static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/
-static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/
-static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/
-static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/
-static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/
-static int __pyx_memoryview_err_dim(PyObject *, PyObject *, int); /*proto*/
-static int __pyx_memoryview_err(PyObject *, PyObject *); /*proto*/
-static int __pyx_memoryview_err_no_memory(void); /*proto*/
-static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/
-static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/
-static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/
-static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/
-static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/
-static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/
-static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/
-static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/
-/* #### Code section: typeinfo ### */
-static __Pyx_TypeInfo __Pyx_TypeInfo_int = { "int", NULL, sizeof(int), { 0 }, 0, __PYX_IS_UNSIGNED(int) ? 'U' : 'I', __PYX_IS_UNSIGNED(int), 0 };
-static __Pyx_TypeInfo __Pyx_TypeInfo_float = { "float", NULL, sizeof(float), { 0 }, 0, 'R', 0, 0 };
-/* #### Code section: before_global_var ### */
-#define __Pyx_MODULE_NAME "monotonic_align.core"
-extern int __pyx_module_is_main_monotonic_align__core;
-int __pyx_module_is_main_monotonic_align__core = 0;
-
-/* Implementation of "monotonic_align.core" */
-/* #### Code section: global_var ### */
-static PyObject *__pyx_builtin_range;
-static PyObject *__pyx_builtin___import__;
-static PyObject *__pyx_builtin_ValueError;
-static PyObject *__pyx_builtin_MemoryError;
-static PyObject *__pyx_builtin_enumerate;
-static PyObject *__pyx_builtin_TypeError;
-static PyObject *__pyx_builtin_AssertionError;
-static PyObject *__pyx_builtin_Ellipsis;
-static PyObject *__pyx_builtin_id;
-static PyObject *__pyx_builtin_IndexError;
-/* #### Code section: string_decls ### */
-static const char __pyx_k_[] = ": ";
-static const char __pyx_k_O[] = "O";
-static const char __pyx_k_c[] = "c";
-static const char __pyx_k__2[] = ".";
-static const char __pyx_k__3[] = "*";
-static const char __pyx_k__6[] = "'";
-static const char __pyx_k__7[] = ")";
-static const char __pyx_k_gc[] = "gc";
-static const char __pyx_k_id[] = "id";
-static const char __pyx_k__23[] = "?";
-static const char __pyx_k_abc[] = "abc";
-static const char __pyx_k_and[] = " and ";
-static const char __pyx_k_got[] = " (got ";
-static const char __pyx_k_new[] = "__new__";
-static const char __pyx_k_obj[] = "obj";
-static const char __pyx_k_sys[] = "sys";
-static const char __pyx_k_base[] = "base";
-static const char __pyx_k_dict[] = "__dict__";
-static const char __pyx_k_main[] = "__main__";
-static const char __pyx_k_mode[] = "mode";
-static const char __pyx_k_name[] = "name";
-static const char __pyx_k_ndim[] = "ndim";
-static const char __pyx_k_pack[] = "pack";
-static const char __pyx_k_size[] = "size";
-static const char __pyx_k_spec[] = "__spec__";
-static const char __pyx_k_step[] = "step";
-static const char __pyx_k_stop[] = "stop";
-static const char __pyx_k_t_xs[] = "t_xs";
-static const char __pyx_k_t_ys[] = "t_ys";
-static const char __pyx_k_test[] = "__test__";
-static const char __pyx_k_ASCII[] = "ASCII";
-static const char __pyx_k_class[] = "__class__";
-static const char __pyx_k_count[] = "count";
-static const char __pyx_k_error[] = "error";
-static const char __pyx_k_flags[] = "flags";
-static const char __pyx_k_index[] = "index";
-static const char __pyx_k_paths[] = "paths";
-static const char __pyx_k_range[] = "range";
-static const char __pyx_k_shape[] = "shape";
-static const char __pyx_k_start[] = "start";
-static const char __pyx_k_enable[] = "enable";
-static const char __pyx_k_encode[] = "encode";
-static const char __pyx_k_format[] = "format";
-static const char __pyx_k_import[] = "__import__";
-static const char __pyx_k_name_2[] = "__name__";
-static const char __pyx_k_pickle[] = "pickle";
-static const char __pyx_k_reduce[] = "__reduce__";
-static const char __pyx_k_struct[] = "struct";
-static const char __pyx_k_unpack[] = "unpack";
-static const char __pyx_k_update[] = "update";
-static const char __pyx_k_values[] = "values";
-static const char __pyx_k_disable[] = "disable";
-static const char __pyx_k_fortran[] = "fortran";
-static const char __pyx_k_memview[] = "memview";
-static const char __pyx_k_Ellipsis[] = "Ellipsis";
-static const char __pyx_k_Sequence[] = "Sequence";
-static const char __pyx_k_core_pyx[] = "core.pyx";
-static const char __pyx_k_getstate[] = "__getstate__";
-static const char __pyx_k_itemsize[] = "itemsize";
-static const char __pyx_k_pyx_type[] = "__pyx_type";
-static const char __pyx_k_register[] = "register";
-static const char __pyx_k_setstate[] = "__setstate__";
-static const char __pyx_k_TypeError[] = "TypeError";
-static const char __pyx_k_enumerate[] = "enumerate";
-static const char __pyx_k_isenabled[] = "isenabled";
-static const char __pyx_k_pyx_state[] = "__pyx_state";
-static const char __pyx_k_reduce_ex[] = "__reduce_ex__";
-static const char __pyx_k_IndexError[] = "IndexError";
-static const char __pyx_k_ValueError[] = "ValueError";
-static const char __pyx_k_pyx_result[] = "__pyx_result";
-static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__";
-static const char __pyx_k_MemoryError[] = "MemoryError";
-static const char __pyx_k_PickleError[] = "PickleError";
-static const char __pyx_k_collections[] = "collections";
-static const char __pyx_k_initializing[] = "_initializing";
-static const char __pyx_k_is_coroutine[] = "_is_coroutine";
-static const char __pyx_k_pyx_checksum[] = "__pyx_checksum";
-static const char __pyx_k_stringsource[] = "";
-static const char __pyx_k_version_info[] = "version_info";
-static const char __pyx_k_class_getitem[] = "__class_getitem__";
-static const char __pyx_k_reduce_cython[] = "__reduce_cython__";
-static const char __pyx_k_AssertionError[] = "AssertionError";
-static const char __pyx_k_maximum_path_c[] = "maximum_path_c";
-static const char __pyx_k_View_MemoryView[] = "View.MemoryView";
-static const char __pyx_k_allocate_buffer[] = "allocate_buffer";
-static const char __pyx_k_collections_abc[] = "collections.abc";
-static const char __pyx_k_dtype_is_object[] = "dtype_is_object";
-static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError";
-static const char __pyx_k_setstate_cython[] = "__setstate_cython__";
-static const char __pyx_k_pyx_unpickle_Enum[] = "__pyx_unpickle_Enum";
-static const char __pyx_k_asyncio_coroutines[] = "asyncio.coroutines";
-static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback";
-static const char __pyx_k_strided_and_direct[] = "";
-static const char __pyx_k_monotonic_align_core[] = "monotonic_align.core";
-static const char __pyx_k_strided_and_indirect[] = "";
-static const char __pyx_k_Invalid_shape_in_axis[] = "Invalid shape in axis ";
-static const char __pyx_k_contiguous_and_direct[] = "";
-static const char __pyx_k_Cannot_index_with_type[] = "Cannot index with type '";
-static const char __pyx_k_MemoryView_of_r_object[] = "";
-static const char __pyx_k_MemoryView_of_r_at_0x_x[] = "";
-static const char __pyx_k_contiguous_and_indirect[] = "";
-static const char __pyx_k_Dimension_d_is_not_direct[] = "Dimension %d is not direct";
-static const char __pyx_k_Index_out_of_bounds_axis_d[] = "Index out of bounds (axis %d)";
-static const char __pyx_k_Step_may_not_be_zero_axis_d[] = "Step may not be zero (axis %d)";
-static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array";
-static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data.";
-static const char __pyx_k_strided_and_direct_or_indirect[] = "";
-static const char __pyx_k_All_dimensions_preceding_dimensi[] = "All dimensions preceding dimension %d must be indexed and not sliced";
-static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides";
-static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory.";
-static const char __pyx_k_Cannot_assign_to_read_only_memor[] = "Cannot assign to read-only memoryview";
-static const char __pyx_k_Cannot_create_writable_memory_vi[] = "Cannot create writable memory view from read-only memoryview";
-static const char __pyx_k_Cannot_transpose_memoryview_with[] = "Cannot transpose memoryview with indirect dimensions";
-static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array";
-static const char __pyx_k_Incompatible_checksums_0x_x_vs_0[] = "Incompatible checksums (0x%x vs (0x82a3537, 0x6ae9995, 0xb068931) = (name))";
-static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported";
-static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got ";
-static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis ";
-static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object";
-static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension ";
-static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__";
-static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides.";
-/* #### Code section: decls ### */
-static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */
-static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
-static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */
-static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */
-static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */
-static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */
-static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
-static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */
-static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
-static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */
-static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */
-static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */
-static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
-static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
-static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
-static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
-static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
-static PyObject *__pyx_pf_15monotonic_align_4core_maximum_path_c(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_paths, __Pyx_memviewslice __pyx_v_values, __Pyx_memviewslice __pyx_v_t_ys, __Pyx_memviewslice __pyx_v_t_xs); /* proto */
-static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
-static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
-static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
-static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
-/* #### Code section: late_includes ### */
-/* #### Code section: module_state ### */
-typedef struct {
- PyObject *__pyx_d;
- PyObject *__pyx_b;
- PyObject *__pyx_cython_runtime;
- PyObject *__pyx_empty_tuple;
- PyObject *__pyx_empty_bytes;
- PyObject *__pyx_empty_unicode;
- #ifdef __Pyx_CyFunction_USED
- PyTypeObject *__pyx_CyFunctionType;
- #endif
- #ifdef __Pyx_FusedFunction_USED
- PyTypeObject *__pyx_FusedFunctionType;
- #endif
- #ifdef __Pyx_Generator_USED
- PyTypeObject *__pyx_GeneratorType;
- #endif
- #ifdef __Pyx_IterableCoroutine_USED
- PyTypeObject *__pyx_IterableCoroutineType;
- #endif
- #ifdef __Pyx_Coroutine_USED
- PyTypeObject *__pyx_CoroutineAwaitType;
- #endif
- #ifdef __Pyx_Coroutine_USED
- PyTypeObject *__pyx_CoroutineType;
- #endif
- #if CYTHON_USE_MODULE_STATE
- #endif
- #if CYTHON_USE_MODULE_STATE
- #endif
- #if CYTHON_USE_MODULE_STATE
- #endif
- #if CYTHON_USE_MODULE_STATE
- PyObject *__pyx_type___pyx_array;
- PyObject *__pyx_type___pyx_MemviewEnum;
- PyObject *__pyx_type___pyx_memoryview;
- PyObject *__pyx_type___pyx_memoryviewslice;
- #endif
- PyTypeObject *__pyx_array_type;
- PyTypeObject *__pyx_MemviewEnum_type;
- PyTypeObject *__pyx_memoryview_type;
- PyTypeObject *__pyx_memoryviewslice_type;
- PyObject *__pyx_kp_u_;
- PyObject *__pyx_n_s_ASCII;
- PyObject *__pyx_kp_s_All_dimensions_preceding_dimensi;
- PyObject *__pyx_n_s_AssertionError;
- PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri;
- PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is;
- PyObject *__pyx_kp_s_Cannot_assign_to_read_only_memor;
- PyObject *__pyx_kp_s_Cannot_create_writable_memory_vi;
- PyObject *__pyx_kp_u_Cannot_index_with_type;
- PyObject *__pyx_kp_s_Cannot_transpose_memoryview_with;
- PyObject *__pyx_kp_s_Dimension_d_is_not_direct;
- PyObject *__pyx_n_s_Ellipsis;
- PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr;
- PyObject *__pyx_kp_s_Incompatible_checksums_0x_x_vs_0;
- PyObject *__pyx_n_s_IndexError;
- PyObject *__pyx_kp_s_Index_out_of_bounds_axis_d;
- PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte;
- PyObject *__pyx_kp_u_Invalid_mode_expected_c_or_fortr;
- PyObject *__pyx_kp_u_Invalid_shape_in_axis;
- PyObject *__pyx_n_s_MemoryError;
- PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x;
- PyObject *__pyx_kp_s_MemoryView_of_r_object;
- PyObject *__pyx_n_b_O;
- PyObject *__pyx_kp_u_Out_of_bounds_on_buffer_access_a;
- PyObject *__pyx_n_s_PickleError;
- PyObject *__pyx_n_s_Sequence;
- PyObject *__pyx_kp_s_Step_may_not_be_zero_axis_d;
- PyObject *__pyx_n_s_TypeError;
- PyObject *__pyx_kp_s_Unable_to_convert_item_to_object;
- PyObject *__pyx_n_s_ValueError;
- PyObject *__pyx_n_s_View_MemoryView;
- PyObject *__pyx_kp_u__2;
- PyObject *__pyx_n_s__23;
- PyObject *__pyx_n_s__3;
- PyObject *__pyx_kp_u__6;
- PyObject *__pyx_kp_u__7;
- PyObject *__pyx_n_s_abc;
- PyObject *__pyx_n_s_allocate_buffer;
- PyObject *__pyx_kp_u_and;
- PyObject *__pyx_n_s_asyncio_coroutines;
- PyObject *__pyx_n_s_base;
- PyObject *__pyx_n_s_c;
- PyObject *__pyx_n_u_c;
- PyObject *__pyx_n_s_class;
- PyObject *__pyx_n_s_class_getitem;
- PyObject *__pyx_n_s_cline_in_traceback;
- PyObject *__pyx_n_s_collections;
- PyObject *__pyx_kp_s_collections_abc;
- PyObject *__pyx_kp_s_contiguous_and_direct;
- PyObject *__pyx_kp_s_contiguous_and_indirect;
- PyObject *__pyx_kp_s_core_pyx;
- PyObject *__pyx_n_s_count;
- PyObject *__pyx_n_s_dict;
- PyObject *__pyx_kp_u_disable;
- PyObject *__pyx_n_s_dtype_is_object;
- PyObject *__pyx_kp_u_enable;
- PyObject *__pyx_n_s_encode;
- PyObject *__pyx_n_s_enumerate;
- PyObject *__pyx_n_s_error;
- PyObject *__pyx_n_s_flags;
- PyObject *__pyx_n_s_format;
- PyObject *__pyx_n_s_fortran;
- PyObject *__pyx_n_u_fortran;
- PyObject *__pyx_kp_u_gc;
- PyObject *__pyx_n_s_getstate;
- PyObject *__pyx_kp_u_got;
- PyObject *__pyx_kp_u_got_differing_extents_in_dimensi;
- PyObject *__pyx_n_s_id;
- PyObject *__pyx_n_s_import;
- PyObject *__pyx_n_s_index;
- PyObject *__pyx_n_s_initializing;
- PyObject *__pyx_n_s_is_coroutine;
- PyObject *__pyx_kp_u_isenabled;
- PyObject *__pyx_n_s_itemsize;
- PyObject *__pyx_kp_s_itemsize_0_for_cython_array;
- PyObject *__pyx_n_s_main;
- PyObject *__pyx_n_s_maximum_path_c;
- PyObject *__pyx_n_s_memview;
- PyObject *__pyx_n_s_mode;
- PyObject *__pyx_n_s_monotonic_align_core;
- PyObject *__pyx_n_s_name;
- PyObject *__pyx_n_s_name_2;
- PyObject *__pyx_n_s_ndim;
- PyObject *__pyx_n_s_new;
- PyObject *__pyx_kp_s_no_default___reduce___due_to_non;
- PyObject *__pyx_n_s_obj;
- PyObject *__pyx_n_s_pack;
- PyObject *__pyx_n_s_paths;
- PyObject *__pyx_n_s_pickle;
- PyObject *__pyx_n_s_pyx_PickleError;
- PyObject *__pyx_n_s_pyx_checksum;
- PyObject *__pyx_n_s_pyx_result;
- PyObject *__pyx_n_s_pyx_state;
- PyObject *__pyx_n_s_pyx_type;
- PyObject *__pyx_n_s_pyx_unpickle_Enum;
- PyObject *__pyx_n_s_pyx_vtable;
- PyObject *__pyx_n_s_range;
- PyObject *__pyx_n_s_reduce;
- PyObject *__pyx_n_s_reduce_cython;
- PyObject *__pyx_n_s_reduce_ex;
- PyObject *__pyx_n_s_register;
- PyObject *__pyx_n_s_setstate;
- PyObject *__pyx_n_s_setstate_cython;
- PyObject *__pyx_n_s_shape;
- PyObject *__pyx_n_s_size;
- PyObject *__pyx_n_s_spec;
- PyObject *__pyx_n_s_start;
- PyObject *__pyx_n_s_step;
- PyObject *__pyx_n_s_stop;
- PyObject *__pyx_kp_s_strided_and_direct;
- PyObject *__pyx_kp_s_strided_and_direct_or_indirect;
- PyObject *__pyx_kp_s_strided_and_indirect;
- PyObject *__pyx_kp_s_stringsource;
- PyObject *__pyx_n_s_struct;
- PyObject *__pyx_n_s_sys;
- PyObject *__pyx_n_s_t_xs;
- PyObject *__pyx_n_s_t_ys;
- PyObject *__pyx_n_s_test;
- PyObject *__pyx_kp_s_unable_to_allocate_array_data;
- PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str;
- PyObject *__pyx_n_s_unpack;
- PyObject *__pyx_n_s_update;
- PyObject *__pyx_n_s_values;
- PyObject *__pyx_n_s_version_info;
- PyObject *__pyx_int_0;
- PyObject *__pyx_int_1;
- PyObject *__pyx_int_3;
- PyObject *__pyx_int_112105877;
- PyObject *__pyx_int_136983863;
- PyObject *__pyx_int_184977713;
- PyObject *__pyx_int_neg_1;
- float __pyx_k__9;
- PyObject *__pyx_slice__5;
- PyObject *__pyx_tuple__4;
- PyObject *__pyx_tuple__8;
- PyObject *__pyx_tuple__10;
- PyObject *__pyx_tuple__11;
- PyObject *__pyx_tuple__12;
- PyObject *__pyx_tuple__13;
- PyObject *__pyx_tuple__14;
- PyObject *__pyx_tuple__15;
- PyObject *__pyx_tuple__16;
- PyObject *__pyx_tuple__17;
- PyObject *__pyx_tuple__18;
- PyObject *__pyx_tuple__19;
- PyObject *__pyx_tuple__21;
- PyObject *__pyx_codeobj__20;
- PyObject *__pyx_codeobj__22;
-} __pyx_mstate;
-
-#if CYTHON_USE_MODULE_STATE
-#ifdef __cplusplus
-namespace {
- extern struct PyModuleDef __pyx_moduledef;
-} /* anonymous namespace */
-#else
-static struct PyModuleDef __pyx_moduledef;
-#endif
-
-#define __pyx_mstate(o) ((__pyx_mstate *)__Pyx_PyModule_GetState(o))
-
-#define __pyx_mstate_global (__pyx_mstate(PyState_FindModule(&__pyx_moduledef)))
-
-#define __pyx_m (PyState_FindModule(&__pyx_moduledef))
-#else
-static __pyx_mstate __pyx_mstate_global_static =
-#ifdef __cplusplus
- {};
-#else
- {0};
-#endif
-static __pyx_mstate *__pyx_mstate_global = &__pyx_mstate_global_static;
-#endif
-/* #### Code section: module_state_clear ### */
-#if CYTHON_USE_MODULE_STATE
-static int __pyx_m_clear(PyObject *m) {
- __pyx_mstate *clear_module_state = __pyx_mstate(m);
- if (!clear_module_state) return 0;
- Py_CLEAR(clear_module_state->__pyx_d);
- Py_CLEAR(clear_module_state->__pyx_b);
- Py_CLEAR(clear_module_state->__pyx_cython_runtime);
- Py_CLEAR(clear_module_state->__pyx_empty_tuple);
- Py_CLEAR(clear_module_state->__pyx_empty_bytes);
- Py_CLEAR(clear_module_state->__pyx_empty_unicode);
- #ifdef __Pyx_CyFunction_USED
- Py_CLEAR(clear_module_state->__pyx_CyFunctionType);
- #endif
- #ifdef __Pyx_FusedFunction_USED
- Py_CLEAR(clear_module_state->__pyx_FusedFunctionType);
- #endif
- Py_CLEAR(clear_module_state->__pyx_array_type);
- Py_CLEAR(clear_module_state->__pyx_type___pyx_array);
- Py_CLEAR(clear_module_state->__pyx_MemviewEnum_type);
- Py_CLEAR(clear_module_state->__pyx_type___pyx_MemviewEnum);
- Py_CLEAR(clear_module_state->__pyx_memoryview_type);
- Py_CLEAR(clear_module_state->__pyx_type___pyx_memoryview);
- Py_CLEAR(clear_module_state->__pyx_memoryviewslice_type);
- Py_CLEAR(clear_module_state->__pyx_type___pyx_memoryviewslice);
- Py_CLEAR(clear_module_state->__pyx_kp_u_);
- Py_CLEAR(clear_module_state->__pyx_n_s_ASCII);
- Py_CLEAR(clear_module_state->__pyx_kp_s_All_dimensions_preceding_dimensi);
- Py_CLEAR(clear_module_state->__pyx_n_s_AssertionError);
- Py_CLEAR(clear_module_state->__pyx_kp_s_Buffer_view_does_not_expose_stri);
- Py_CLEAR(clear_module_state->__pyx_kp_s_Can_only_create_a_buffer_that_is);
- Py_CLEAR(clear_module_state->__pyx_kp_s_Cannot_assign_to_read_only_memor);
- Py_CLEAR(clear_module_state->__pyx_kp_s_Cannot_create_writable_memory_vi);
- Py_CLEAR(clear_module_state->__pyx_kp_u_Cannot_index_with_type);
- Py_CLEAR(clear_module_state->__pyx_kp_s_Cannot_transpose_memoryview_with);
- Py_CLEAR(clear_module_state->__pyx_kp_s_Dimension_d_is_not_direct);
- Py_CLEAR(clear_module_state->__pyx_n_s_Ellipsis);
- Py_CLEAR(clear_module_state->__pyx_kp_s_Empty_shape_tuple_for_cython_arr);
- Py_CLEAR(clear_module_state->__pyx_kp_s_Incompatible_checksums_0x_x_vs_0);
- Py_CLEAR(clear_module_state->__pyx_n_s_IndexError);
- Py_CLEAR(clear_module_state->__pyx_kp_s_Index_out_of_bounds_axis_d);
- Py_CLEAR(clear_module_state->__pyx_kp_s_Indirect_dimensions_not_supporte);
- Py_CLEAR(clear_module_state->__pyx_kp_u_Invalid_mode_expected_c_or_fortr);
- Py_CLEAR(clear_module_state->__pyx_kp_u_Invalid_shape_in_axis);
- Py_CLEAR(clear_module_state->__pyx_n_s_MemoryError);
- Py_CLEAR(clear_module_state->__pyx_kp_s_MemoryView_of_r_at_0x_x);
- Py_CLEAR(clear_module_state->__pyx_kp_s_MemoryView_of_r_object);
- Py_CLEAR(clear_module_state->__pyx_n_b_O);
- Py_CLEAR(clear_module_state->__pyx_kp_u_Out_of_bounds_on_buffer_access_a);
- Py_CLEAR(clear_module_state->__pyx_n_s_PickleError);
- Py_CLEAR(clear_module_state->__pyx_n_s_Sequence);
- Py_CLEAR(clear_module_state->__pyx_kp_s_Step_may_not_be_zero_axis_d);
- Py_CLEAR(clear_module_state->__pyx_n_s_TypeError);
- Py_CLEAR(clear_module_state->__pyx_kp_s_Unable_to_convert_item_to_object);
- Py_CLEAR(clear_module_state->__pyx_n_s_ValueError);
- Py_CLEAR(clear_module_state->__pyx_n_s_View_MemoryView);
- Py_CLEAR(clear_module_state->__pyx_kp_u__2);
- Py_CLEAR(clear_module_state->__pyx_n_s__23);
- Py_CLEAR(clear_module_state->__pyx_n_s__3);
- Py_CLEAR(clear_module_state->__pyx_kp_u__6);
- Py_CLEAR(clear_module_state->__pyx_kp_u__7);
- Py_CLEAR(clear_module_state->__pyx_n_s_abc);
- Py_CLEAR(clear_module_state->__pyx_n_s_allocate_buffer);
- Py_CLEAR(clear_module_state->__pyx_kp_u_and);
- Py_CLEAR(clear_module_state->__pyx_n_s_asyncio_coroutines);
- Py_CLEAR(clear_module_state->__pyx_n_s_base);
- Py_CLEAR(clear_module_state->__pyx_n_s_c);
- Py_CLEAR(clear_module_state->__pyx_n_u_c);
- Py_CLEAR(clear_module_state->__pyx_n_s_class);
- Py_CLEAR(clear_module_state->__pyx_n_s_class_getitem);
- Py_CLEAR(clear_module_state->__pyx_n_s_cline_in_traceback);
- Py_CLEAR(clear_module_state->__pyx_n_s_collections);
- Py_CLEAR(clear_module_state->__pyx_kp_s_collections_abc);
- Py_CLEAR(clear_module_state->__pyx_kp_s_contiguous_and_direct);
- Py_CLEAR(clear_module_state->__pyx_kp_s_contiguous_and_indirect);
- Py_CLEAR(clear_module_state->__pyx_kp_s_core_pyx);
- Py_CLEAR(clear_module_state->__pyx_n_s_count);
- Py_CLEAR(clear_module_state->__pyx_n_s_dict);
- Py_CLEAR(clear_module_state->__pyx_kp_u_disable);
- Py_CLEAR(clear_module_state->__pyx_n_s_dtype_is_object);
- Py_CLEAR(clear_module_state->__pyx_kp_u_enable);
- Py_CLEAR(clear_module_state->__pyx_n_s_encode);
- Py_CLEAR(clear_module_state->__pyx_n_s_enumerate);
- Py_CLEAR(clear_module_state->__pyx_n_s_error);
- Py_CLEAR(clear_module_state->__pyx_n_s_flags);
- Py_CLEAR(clear_module_state->__pyx_n_s_format);
- Py_CLEAR(clear_module_state->__pyx_n_s_fortran);
- Py_CLEAR(clear_module_state->__pyx_n_u_fortran);
- Py_CLEAR(clear_module_state->__pyx_kp_u_gc);
- Py_CLEAR(clear_module_state->__pyx_n_s_getstate);
- Py_CLEAR(clear_module_state->__pyx_kp_u_got);
- Py_CLEAR(clear_module_state->__pyx_kp_u_got_differing_extents_in_dimensi);
- Py_CLEAR(clear_module_state->__pyx_n_s_id);
- Py_CLEAR(clear_module_state->__pyx_n_s_import);
- Py_CLEAR(clear_module_state->__pyx_n_s_index);
- Py_CLEAR(clear_module_state->__pyx_n_s_initializing);
- Py_CLEAR(clear_module_state->__pyx_n_s_is_coroutine);
- Py_CLEAR(clear_module_state->__pyx_kp_u_isenabled);
- Py_CLEAR(clear_module_state->__pyx_n_s_itemsize);
- Py_CLEAR(clear_module_state->__pyx_kp_s_itemsize_0_for_cython_array);
- Py_CLEAR(clear_module_state->__pyx_n_s_main);
- Py_CLEAR(clear_module_state->__pyx_n_s_maximum_path_c);
- Py_CLEAR(clear_module_state->__pyx_n_s_memview);
- Py_CLEAR(clear_module_state->__pyx_n_s_mode);
- Py_CLEAR(clear_module_state->__pyx_n_s_monotonic_align_core);
- Py_CLEAR(clear_module_state->__pyx_n_s_name);
- Py_CLEAR(clear_module_state->__pyx_n_s_name_2);
- Py_CLEAR(clear_module_state->__pyx_n_s_ndim);
- Py_CLEAR(clear_module_state->__pyx_n_s_new);
- Py_CLEAR(clear_module_state->__pyx_kp_s_no_default___reduce___due_to_non);
- Py_CLEAR(clear_module_state->__pyx_n_s_obj);
- Py_CLEAR(clear_module_state->__pyx_n_s_pack);
- Py_CLEAR(clear_module_state->__pyx_n_s_paths);
- Py_CLEAR(clear_module_state->__pyx_n_s_pickle);
- Py_CLEAR(clear_module_state->__pyx_n_s_pyx_PickleError);
- Py_CLEAR(clear_module_state->__pyx_n_s_pyx_checksum);
- Py_CLEAR(clear_module_state->__pyx_n_s_pyx_result);
- Py_CLEAR(clear_module_state->__pyx_n_s_pyx_state);
- Py_CLEAR(clear_module_state->__pyx_n_s_pyx_type);
- Py_CLEAR(clear_module_state->__pyx_n_s_pyx_unpickle_Enum);
- Py_CLEAR(clear_module_state->__pyx_n_s_pyx_vtable);
- Py_CLEAR(clear_module_state->__pyx_n_s_range);
- Py_CLEAR(clear_module_state->__pyx_n_s_reduce);
- Py_CLEAR(clear_module_state->__pyx_n_s_reduce_cython);
- Py_CLEAR(clear_module_state->__pyx_n_s_reduce_ex);
- Py_CLEAR(clear_module_state->__pyx_n_s_register);
- Py_CLEAR(clear_module_state->__pyx_n_s_setstate);
- Py_CLEAR(clear_module_state->__pyx_n_s_setstate_cython);
- Py_CLEAR(clear_module_state->__pyx_n_s_shape);
- Py_CLEAR(clear_module_state->__pyx_n_s_size);
- Py_CLEAR(clear_module_state->__pyx_n_s_spec);
- Py_CLEAR(clear_module_state->__pyx_n_s_start);
- Py_CLEAR(clear_module_state->__pyx_n_s_step);
- Py_CLEAR(clear_module_state->__pyx_n_s_stop);
- Py_CLEAR(clear_module_state->__pyx_kp_s_strided_and_direct);
- Py_CLEAR(clear_module_state->__pyx_kp_s_strided_and_direct_or_indirect);
- Py_CLEAR(clear_module_state->__pyx_kp_s_strided_and_indirect);
- Py_CLEAR(clear_module_state->__pyx_kp_s_stringsource);
- Py_CLEAR(clear_module_state->__pyx_n_s_struct);
- Py_CLEAR(clear_module_state->__pyx_n_s_sys);
- Py_CLEAR(clear_module_state->__pyx_n_s_t_xs);
- Py_CLEAR(clear_module_state->__pyx_n_s_t_ys);
- Py_CLEAR(clear_module_state->__pyx_n_s_test);
- Py_CLEAR(clear_module_state->__pyx_kp_s_unable_to_allocate_array_data);
- Py_CLEAR(clear_module_state->__pyx_kp_s_unable_to_allocate_shape_and_str);
- Py_CLEAR(clear_module_state->__pyx_n_s_unpack);
- Py_CLEAR(clear_module_state->__pyx_n_s_update);
- Py_CLEAR(clear_module_state->__pyx_n_s_values);
- Py_CLEAR(clear_module_state->__pyx_n_s_version_info);
- Py_CLEAR(clear_module_state->__pyx_int_0);
- Py_CLEAR(clear_module_state->__pyx_int_1);
- Py_CLEAR(clear_module_state->__pyx_int_3);
- Py_CLEAR(clear_module_state->__pyx_int_112105877);
- Py_CLEAR(clear_module_state->__pyx_int_136983863);
- Py_CLEAR(clear_module_state->__pyx_int_184977713);
- Py_CLEAR(clear_module_state->__pyx_int_neg_1);
- Py_CLEAR(clear_module_state->__pyx_slice__5);
- Py_CLEAR(clear_module_state->__pyx_tuple__4);
- Py_CLEAR(clear_module_state->__pyx_tuple__8);
- Py_CLEAR(clear_module_state->__pyx_tuple__10);
- Py_CLEAR(clear_module_state->__pyx_tuple__11);
- Py_CLEAR(clear_module_state->__pyx_tuple__12);
- Py_CLEAR(clear_module_state->__pyx_tuple__13);
- Py_CLEAR(clear_module_state->__pyx_tuple__14);
- Py_CLEAR(clear_module_state->__pyx_tuple__15);
- Py_CLEAR(clear_module_state->__pyx_tuple__16);
- Py_CLEAR(clear_module_state->__pyx_tuple__17);
- Py_CLEAR(clear_module_state->__pyx_tuple__18);
- Py_CLEAR(clear_module_state->__pyx_tuple__19);
- Py_CLEAR(clear_module_state->__pyx_tuple__21);
- Py_CLEAR(clear_module_state->__pyx_codeobj__20);
- Py_CLEAR(clear_module_state->__pyx_codeobj__22);
- return 0;
-}
-#endif
-/* #### Code section: module_state_traverse ### */
-#if CYTHON_USE_MODULE_STATE
-static int __pyx_m_traverse(PyObject *m, visitproc visit, void *arg) {
- __pyx_mstate *traverse_module_state = __pyx_mstate(m);
- if (!traverse_module_state) return 0;
- Py_VISIT(traverse_module_state->__pyx_d);
- Py_VISIT(traverse_module_state->__pyx_b);
- Py_VISIT(traverse_module_state->__pyx_cython_runtime);
- Py_VISIT(traverse_module_state->__pyx_empty_tuple);
- Py_VISIT(traverse_module_state->__pyx_empty_bytes);
- Py_VISIT(traverse_module_state->__pyx_empty_unicode);
- #ifdef __Pyx_CyFunction_USED
- Py_VISIT(traverse_module_state->__pyx_CyFunctionType);
- #endif
- #ifdef __Pyx_FusedFunction_USED
- Py_VISIT(traverse_module_state->__pyx_FusedFunctionType);
- #endif
- Py_VISIT(traverse_module_state->__pyx_array_type);
- Py_VISIT(traverse_module_state->__pyx_type___pyx_array);
- Py_VISIT(traverse_module_state->__pyx_MemviewEnum_type);
- Py_VISIT(traverse_module_state->__pyx_type___pyx_MemviewEnum);
- Py_VISIT(traverse_module_state->__pyx_memoryview_type);
- Py_VISIT(traverse_module_state->__pyx_type___pyx_memoryview);
- Py_VISIT(traverse_module_state->__pyx_memoryviewslice_type);
- Py_VISIT(traverse_module_state->__pyx_type___pyx_memoryviewslice);
- Py_VISIT(traverse_module_state->__pyx_kp_u_);
- Py_VISIT(traverse_module_state->__pyx_n_s_ASCII);
- Py_VISIT(traverse_module_state->__pyx_kp_s_All_dimensions_preceding_dimensi);
- Py_VISIT(traverse_module_state->__pyx_n_s_AssertionError);
- Py_VISIT(traverse_module_state->__pyx_kp_s_Buffer_view_does_not_expose_stri);
- Py_VISIT(traverse_module_state->__pyx_kp_s_Can_only_create_a_buffer_that_is);
- Py_VISIT(traverse_module_state->__pyx_kp_s_Cannot_assign_to_read_only_memor);
- Py_VISIT(traverse_module_state->__pyx_kp_s_Cannot_create_writable_memory_vi);
- Py_VISIT(traverse_module_state->__pyx_kp_u_Cannot_index_with_type);
- Py_VISIT(traverse_module_state->__pyx_kp_s_Cannot_transpose_memoryview_with);
- Py_VISIT(traverse_module_state->__pyx_kp_s_Dimension_d_is_not_direct);
- Py_VISIT(traverse_module_state->__pyx_n_s_Ellipsis);
- Py_VISIT(traverse_module_state->__pyx_kp_s_Empty_shape_tuple_for_cython_arr);
- Py_VISIT(traverse_module_state->__pyx_kp_s_Incompatible_checksums_0x_x_vs_0);
- Py_VISIT(traverse_module_state->__pyx_n_s_IndexError);
- Py_VISIT(traverse_module_state->__pyx_kp_s_Index_out_of_bounds_axis_d);
- Py_VISIT(traverse_module_state->__pyx_kp_s_Indirect_dimensions_not_supporte);
- Py_VISIT(traverse_module_state->__pyx_kp_u_Invalid_mode_expected_c_or_fortr);
- Py_VISIT(traverse_module_state->__pyx_kp_u_Invalid_shape_in_axis);
- Py_VISIT(traverse_module_state->__pyx_n_s_MemoryError);
- Py_VISIT(traverse_module_state->__pyx_kp_s_MemoryView_of_r_at_0x_x);
- Py_VISIT(traverse_module_state->__pyx_kp_s_MemoryView_of_r_object);
- Py_VISIT(traverse_module_state->__pyx_n_b_O);
- Py_VISIT(traverse_module_state->__pyx_kp_u_Out_of_bounds_on_buffer_access_a);
- Py_VISIT(traverse_module_state->__pyx_n_s_PickleError);
- Py_VISIT(traverse_module_state->__pyx_n_s_Sequence);
- Py_VISIT(traverse_module_state->__pyx_kp_s_Step_may_not_be_zero_axis_d);
- Py_VISIT(traverse_module_state->__pyx_n_s_TypeError);
- Py_VISIT(traverse_module_state->__pyx_kp_s_Unable_to_convert_item_to_object);
- Py_VISIT(traverse_module_state->__pyx_n_s_ValueError);
- Py_VISIT(traverse_module_state->__pyx_n_s_View_MemoryView);
- Py_VISIT(traverse_module_state->__pyx_kp_u__2);
- Py_VISIT(traverse_module_state->__pyx_n_s__23);
- Py_VISIT(traverse_module_state->__pyx_n_s__3);
- Py_VISIT(traverse_module_state->__pyx_kp_u__6);
- Py_VISIT(traverse_module_state->__pyx_kp_u__7);
- Py_VISIT(traverse_module_state->__pyx_n_s_abc);
- Py_VISIT(traverse_module_state->__pyx_n_s_allocate_buffer);
- Py_VISIT(traverse_module_state->__pyx_kp_u_and);
- Py_VISIT(traverse_module_state->__pyx_n_s_asyncio_coroutines);
- Py_VISIT(traverse_module_state->__pyx_n_s_base);
- Py_VISIT(traverse_module_state->__pyx_n_s_c);
- Py_VISIT(traverse_module_state->__pyx_n_u_c);
- Py_VISIT(traverse_module_state->__pyx_n_s_class);
- Py_VISIT(traverse_module_state->__pyx_n_s_class_getitem);
- Py_VISIT(traverse_module_state->__pyx_n_s_cline_in_traceback);
- Py_VISIT(traverse_module_state->__pyx_n_s_collections);
- Py_VISIT(traverse_module_state->__pyx_kp_s_collections_abc);
- Py_VISIT(traverse_module_state->__pyx_kp_s_contiguous_and_direct);
- Py_VISIT(traverse_module_state->__pyx_kp_s_contiguous_and_indirect);
- Py_VISIT(traverse_module_state->__pyx_kp_s_core_pyx);
- Py_VISIT(traverse_module_state->__pyx_n_s_count);
- Py_VISIT(traverse_module_state->__pyx_n_s_dict);
- Py_VISIT(traverse_module_state->__pyx_kp_u_disable);
- Py_VISIT(traverse_module_state->__pyx_n_s_dtype_is_object);
- Py_VISIT(traverse_module_state->__pyx_kp_u_enable);
- Py_VISIT(traverse_module_state->__pyx_n_s_encode);
- Py_VISIT(traverse_module_state->__pyx_n_s_enumerate);
- Py_VISIT(traverse_module_state->__pyx_n_s_error);
- Py_VISIT(traverse_module_state->__pyx_n_s_flags);
- Py_VISIT(traverse_module_state->__pyx_n_s_format);
- Py_VISIT(traverse_module_state->__pyx_n_s_fortran);
- Py_VISIT(traverse_module_state->__pyx_n_u_fortran);
- Py_VISIT(traverse_module_state->__pyx_kp_u_gc);
- Py_VISIT(traverse_module_state->__pyx_n_s_getstate);
- Py_VISIT(traverse_module_state->__pyx_kp_u_got);
- Py_VISIT(traverse_module_state->__pyx_kp_u_got_differing_extents_in_dimensi);
- Py_VISIT(traverse_module_state->__pyx_n_s_id);
- Py_VISIT(traverse_module_state->__pyx_n_s_import);
- Py_VISIT(traverse_module_state->__pyx_n_s_index);
- Py_VISIT(traverse_module_state->__pyx_n_s_initializing);
- Py_VISIT(traverse_module_state->__pyx_n_s_is_coroutine);
- Py_VISIT(traverse_module_state->__pyx_kp_u_isenabled);
- Py_VISIT(traverse_module_state->__pyx_n_s_itemsize);
- Py_VISIT(traverse_module_state->__pyx_kp_s_itemsize_0_for_cython_array);
- Py_VISIT(traverse_module_state->__pyx_n_s_main);
- Py_VISIT(traverse_module_state->__pyx_n_s_maximum_path_c);
- Py_VISIT(traverse_module_state->__pyx_n_s_memview);
- Py_VISIT(traverse_module_state->__pyx_n_s_mode);
- Py_VISIT(traverse_module_state->__pyx_n_s_monotonic_align_core);
- Py_VISIT(traverse_module_state->__pyx_n_s_name);
- Py_VISIT(traverse_module_state->__pyx_n_s_name_2);
- Py_VISIT(traverse_module_state->__pyx_n_s_ndim);
- Py_VISIT(traverse_module_state->__pyx_n_s_new);
- Py_VISIT(traverse_module_state->__pyx_kp_s_no_default___reduce___due_to_non);
- Py_VISIT(traverse_module_state->__pyx_n_s_obj);
- Py_VISIT(traverse_module_state->__pyx_n_s_pack);
- Py_VISIT(traverse_module_state->__pyx_n_s_paths);
- Py_VISIT(traverse_module_state->__pyx_n_s_pickle);
- Py_VISIT(traverse_module_state->__pyx_n_s_pyx_PickleError);
- Py_VISIT(traverse_module_state->__pyx_n_s_pyx_checksum);
- Py_VISIT(traverse_module_state->__pyx_n_s_pyx_result);
- Py_VISIT(traverse_module_state->__pyx_n_s_pyx_state);
- Py_VISIT(traverse_module_state->__pyx_n_s_pyx_type);
- Py_VISIT(traverse_module_state->__pyx_n_s_pyx_unpickle_Enum);
- Py_VISIT(traverse_module_state->__pyx_n_s_pyx_vtable);
- Py_VISIT(traverse_module_state->__pyx_n_s_range);
- Py_VISIT(traverse_module_state->__pyx_n_s_reduce);
- Py_VISIT(traverse_module_state->__pyx_n_s_reduce_cython);
- Py_VISIT(traverse_module_state->__pyx_n_s_reduce_ex);
- Py_VISIT(traverse_module_state->__pyx_n_s_register);
- Py_VISIT(traverse_module_state->__pyx_n_s_setstate);
- Py_VISIT(traverse_module_state->__pyx_n_s_setstate_cython);
- Py_VISIT(traverse_module_state->__pyx_n_s_shape);
- Py_VISIT(traverse_module_state->__pyx_n_s_size);
- Py_VISIT(traverse_module_state->__pyx_n_s_spec);
- Py_VISIT(traverse_module_state->__pyx_n_s_start);
- Py_VISIT(traverse_module_state->__pyx_n_s_step);
- Py_VISIT(traverse_module_state->__pyx_n_s_stop);
- Py_VISIT(traverse_module_state->__pyx_kp_s_strided_and_direct);
- Py_VISIT(traverse_module_state->__pyx_kp_s_strided_and_direct_or_indirect);
- Py_VISIT(traverse_module_state->__pyx_kp_s_strided_and_indirect);
- Py_VISIT(traverse_module_state->__pyx_kp_s_stringsource);
- Py_VISIT(traverse_module_state->__pyx_n_s_struct);
- Py_VISIT(traverse_module_state->__pyx_n_s_sys);
- Py_VISIT(traverse_module_state->__pyx_n_s_t_xs);
- Py_VISIT(traverse_module_state->__pyx_n_s_t_ys);
- Py_VISIT(traverse_module_state->__pyx_n_s_test);
- Py_VISIT(traverse_module_state->__pyx_kp_s_unable_to_allocate_array_data);
- Py_VISIT(traverse_module_state->__pyx_kp_s_unable_to_allocate_shape_and_str);
- Py_VISIT(traverse_module_state->__pyx_n_s_unpack);
- Py_VISIT(traverse_module_state->__pyx_n_s_update);
- Py_VISIT(traverse_module_state->__pyx_n_s_values);
- Py_VISIT(traverse_module_state->__pyx_n_s_version_info);
- Py_VISIT(traverse_module_state->__pyx_int_0);
- Py_VISIT(traverse_module_state->__pyx_int_1);
- Py_VISIT(traverse_module_state->__pyx_int_3);
- Py_VISIT(traverse_module_state->__pyx_int_112105877);
- Py_VISIT(traverse_module_state->__pyx_int_136983863);
- Py_VISIT(traverse_module_state->__pyx_int_184977713);
- Py_VISIT(traverse_module_state->__pyx_int_neg_1);
- Py_VISIT(traverse_module_state->__pyx_slice__5);
- Py_VISIT(traverse_module_state->__pyx_tuple__4);
- Py_VISIT(traverse_module_state->__pyx_tuple__8);
- Py_VISIT(traverse_module_state->__pyx_tuple__10);
- Py_VISIT(traverse_module_state->__pyx_tuple__11);
- Py_VISIT(traverse_module_state->__pyx_tuple__12);
- Py_VISIT(traverse_module_state->__pyx_tuple__13);
- Py_VISIT(traverse_module_state->__pyx_tuple__14);
- Py_VISIT(traverse_module_state->__pyx_tuple__15);
- Py_VISIT(traverse_module_state->__pyx_tuple__16);
- Py_VISIT(traverse_module_state->__pyx_tuple__17);
- Py_VISIT(traverse_module_state->__pyx_tuple__18);
- Py_VISIT(traverse_module_state->__pyx_tuple__19);
- Py_VISIT(traverse_module_state->__pyx_tuple__21);
- Py_VISIT(traverse_module_state->__pyx_codeobj__20);
- Py_VISIT(traverse_module_state->__pyx_codeobj__22);
- return 0;
-}
-#endif
-/* #### Code section: module_state_defines ### */
-#define __pyx_d __pyx_mstate_global->__pyx_d
-#define __pyx_b __pyx_mstate_global->__pyx_b
-#define __pyx_cython_runtime __pyx_mstate_global->__pyx_cython_runtime
-#define __pyx_empty_tuple __pyx_mstate_global->__pyx_empty_tuple
-#define __pyx_empty_bytes __pyx_mstate_global->__pyx_empty_bytes
-#define __pyx_empty_unicode __pyx_mstate_global->__pyx_empty_unicode
-#ifdef __Pyx_CyFunction_USED
-#define __pyx_CyFunctionType __pyx_mstate_global->__pyx_CyFunctionType
-#endif
-#ifdef __Pyx_FusedFunction_USED
-#define __pyx_FusedFunctionType __pyx_mstate_global->__pyx_FusedFunctionType
-#endif
-#ifdef __Pyx_Generator_USED
-#define __pyx_GeneratorType __pyx_mstate_global->__pyx_GeneratorType
-#endif
-#ifdef __Pyx_IterableCoroutine_USED
-#define __pyx_IterableCoroutineType __pyx_mstate_global->__pyx_IterableCoroutineType
-#endif
-#ifdef __Pyx_Coroutine_USED
-#define __pyx_CoroutineAwaitType __pyx_mstate_global->__pyx_CoroutineAwaitType
-#endif
-#ifdef __Pyx_Coroutine_USED
-#define __pyx_CoroutineType __pyx_mstate_global->__pyx_CoroutineType
-#endif
-#if CYTHON_USE_MODULE_STATE
-#endif
-#if CYTHON_USE_MODULE_STATE
-#endif
-#if CYTHON_USE_MODULE_STATE
-#endif
-#if CYTHON_USE_MODULE_STATE
-#define __pyx_type___pyx_array __pyx_mstate_global->__pyx_type___pyx_array
-#define __pyx_type___pyx_MemviewEnum __pyx_mstate_global->__pyx_type___pyx_MemviewEnum
-#define __pyx_type___pyx_memoryview __pyx_mstate_global->__pyx_type___pyx_memoryview
-#define __pyx_type___pyx_memoryviewslice __pyx_mstate_global->__pyx_type___pyx_memoryviewslice
-#endif
-#define __pyx_array_type __pyx_mstate_global->__pyx_array_type
-#define __pyx_MemviewEnum_type __pyx_mstate_global->__pyx_MemviewEnum_type
-#define __pyx_memoryview_type __pyx_mstate_global->__pyx_memoryview_type
-#define __pyx_memoryviewslice_type __pyx_mstate_global->__pyx_memoryviewslice_type
-#define __pyx_kp_u_ __pyx_mstate_global->__pyx_kp_u_
-#define __pyx_n_s_ASCII __pyx_mstate_global->__pyx_n_s_ASCII
-#define __pyx_kp_s_All_dimensions_preceding_dimensi __pyx_mstate_global->__pyx_kp_s_All_dimensions_preceding_dimensi
-#define __pyx_n_s_AssertionError __pyx_mstate_global->__pyx_n_s_AssertionError
-#define __pyx_kp_s_Buffer_view_does_not_expose_stri __pyx_mstate_global->__pyx_kp_s_Buffer_view_does_not_expose_stri
-#define __pyx_kp_s_Can_only_create_a_buffer_that_is __pyx_mstate_global->__pyx_kp_s_Can_only_create_a_buffer_that_is
-#define __pyx_kp_s_Cannot_assign_to_read_only_memor __pyx_mstate_global->__pyx_kp_s_Cannot_assign_to_read_only_memor
-#define __pyx_kp_s_Cannot_create_writable_memory_vi __pyx_mstate_global->__pyx_kp_s_Cannot_create_writable_memory_vi
-#define __pyx_kp_u_Cannot_index_with_type __pyx_mstate_global->__pyx_kp_u_Cannot_index_with_type
-#define __pyx_kp_s_Cannot_transpose_memoryview_with __pyx_mstate_global->__pyx_kp_s_Cannot_transpose_memoryview_with
-#define __pyx_kp_s_Dimension_d_is_not_direct __pyx_mstate_global->__pyx_kp_s_Dimension_d_is_not_direct
-#define __pyx_n_s_Ellipsis __pyx_mstate_global->__pyx_n_s_Ellipsis
-#define __pyx_kp_s_Empty_shape_tuple_for_cython_arr __pyx_mstate_global->__pyx_kp_s_Empty_shape_tuple_for_cython_arr
-#define __pyx_kp_s_Incompatible_checksums_0x_x_vs_0 __pyx_mstate_global->__pyx_kp_s_Incompatible_checksums_0x_x_vs_0
-#define __pyx_n_s_IndexError __pyx_mstate_global->__pyx_n_s_IndexError
-#define __pyx_kp_s_Index_out_of_bounds_axis_d __pyx_mstate_global->__pyx_kp_s_Index_out_of_bounds_axis_d
-#define __pyx_kp_s_Indirect_dimensions_not_supporte __pyx_mstate_global->__pyx_kp_s_Indirect_dimensions_not_supporte
-#define __pyx_kp_u_Invalid_mode_expected_c_or_fortr __pyx_mstate_global->__pyx_kp_u_Invalid_mode_expected_c_or_fortr
-#define __pyx_kp_u_Invalid_shape_in_axis __pyx_mstate_global->__pyx_kp_u_Invalid_shape_in_axis
-#define __pyx_n_s_MemoryError __pyx_mstate_global->__pyx_n_s_MemoryError
-#define __pyx_kp_s_MemoryView_of_r_at_0x_x __pyx_mstate_global->__pyx_kp_s_MemoryView_of_r_at_0x_x
-#define __pyx_kp_s_MemoryView_of_r_object __pyx_mstate_global->__pyx_kp_s_MemoryView_of_r_object
-#define __pyx_n_b_O __pyx_mstate_global->__pyx_n_b_O
-#define __pyx_kp_u_Out_of_bounds_on_buffer_access_a __pyx_mstate_global->__pyx_kp_u_Out_of_bounds_on_buffer_access_a
-#define __pyx_n_s_PickleError __pyx_mstate_global->__pyx_n_s_PickleError
-#define __pyx_n_s_Sequence __pyx_mstate_global->__pyx_n_s_Sequence
-#define __pyx_kp_s_Step_may_not_be_zero_axis_d __pyx_mstate_global->__pyx_kp_s_Step_may_not_be_zero_axis_d
-#define __pyx_n_s_TypeError __pyx_mstate_global->__pyx_n_s_TypeError
-#define __pyx_kp_s_Unable_to_convert_item_to_object __pyx_mstate_global->__pyx_kp_s_Unable_to_convert_item_to_object
-#define __pyx_n_s_ValueError __pyx_mstate_global->__pyx_n_s_ValueError
-#define __pyx_n_s_View_MemoryView __pyx_mstate_global->__pyx_n_s_View_MemoryView
-#define __pyx_kp_u__2 __pyx_mstate_global->__pyx_kp_u__2
-#define __pyx_n_s__23 __pyx_mstate_global->__pyx_n_s__23
-#define __pyx_n_s__3 __pyx_mstate_global->__pyx_n_s__3
-#define __pyx_kp_u__6 __pyx_mstate_global->__pyx_kp_u__6
-#define __pyx_kp_u__7 __pyx_mstate_global->__pyx_kp_u__7
-#define __pyx_n_s_abc __pyx_mstate_global->__pyx_n_s_abc
-#define __pyx_n_s_allocate_buffer __pyx_mstate_global->__pyx_n_s_allocate_buffer
-#define __pyx_kp_u_and __pyx_mstate_global->__pyx_kp_u_and
-#define __pyx_n_s_asyncio_coroutines __pyx_mstate_global->__pyx_n_s_asyncio_coroutines
-#define __pyx_n_s_base __pyx_mstate_global->__pyx_n_s_base
-#define __pyx_n_s_c __pyx_mstate_global->__pyx_n_s_c
-#define __pyx_n_u_c __pyx_mstate_global->__pyx_n_u_c
-#define __pyx_n_s_class __pyx_mstate_global->__pyx_n_s_class
-#define __pyx_n_s_class_getitem __pyx_mstate_global->__pyx_n_s_class_getitem
-#define __pyx_n_s_cline_in_traceback __pyx_mstate_global->__pyx_n_s_cline_in_traceback
-#define __pyx_n_s_collections __pyx_mstate_global->__pyx_n_s_collections
-#define __pyx_kp_s_collections_abc __pyx_mstate_global->__pyx_kp_s_collections_abc
-#define __pyx_kp_s_contiguous_and_direct __pyx_mstate_global->__pyx_kp_s_contiguous_and_direct
-#define __pyx_kp_s_contiguous_and_indirect __pyx_mstate_global->__pyx_kp_s_contiguous_and_indirect
-#define __pyx_kp_s_core_pyx __pyx_mstate_global->__pyx_kp_s_core_pyx
-#define __pyx_n_s_count __pyx_mstate_global->__pyx_n_s_count
-#define __pyx_n_s_dict __pyx_mstate_global->__pyx_n_s_dict
-#define __pyx_kp_u_disable __pyx_mstate_global->__pyx_kp_u_disable
-#define __pyx_n_s_dtype_is_object __pyx_mstate_global->__pyx_n_s_dtype_is_object
-#define __pyx_kp_u_enable __pyx_mstate_global->__pyx_kp_u_enable
-#define __pyx_n_s_encode __pyx_mstate_global->__pyx_n_s_encode
-#define __pyx_n_s_enumerate __pyx_mstate_global->__pyx_n_s_enumerate
-#define __pyx_n_s_error __pyx_mstate_global->__pyx_n_s_error
-#define __pyx_n_s_flags __pyx_mstate_global->__pyx_n_s_flags
-#define __pyx_n_s_format __pyx_mstate_global->__pyx_n_s_format
-#define __pyx_n_s_fortran __pyx_mstate_global->__pyx_n_s_fortran
-#define __pyx_n_u_fortran __pyx_mstate_global->__pyx_n_u_fortran
-#define __pyx_kp_u_gc __pyx_mstate_global->__pyx_kp_u_gc
-#define __pyx_n_s_getstate __pyx_mstate_global->__pyx_n_s_getstate
-#define __pyx_kp_u_got __pyx_mstate_global->__pyx_kp_u_got
-#define __pyx_kp_u_got_differing_extents_in_dimensi __pyx_mstate_global->__pyx_kp_u_got_differing_extents_in_dimensi
-#define __pyx_n_s_id __pyx_mstate_global->__pyx_n_s_id
-#define __pyx_n_s_import __pyx_mstate_global->__pyx_n_s_import
-#define __pyx_n_s_index __pyx_mstate_global->__pyx_n_s_index
-#define __pyx_n_s_initializing __pyx_mstate_global->__pyx_n_s_initializing
-#define __pyx_n_s_is_coroutine __pyx_mstate_global->__pyx_n_s_is_coroutine
-#define __pyx_kp_u_isenabled __pyx_mstate_global->__pyx_kp_u_isenabled
-#define __pyx_n_s_itemsize __pyx_mstate_global->__pyx_n_s_itemsize
-#define __pyx_kp_s_itemsize_0_for_cython_array __pyx_mstate_global->__pyx_kp_s_itemsize_0_for_cython_array
-#define __pyx_n_s_main __pyx_mstate_global->__pyx_n_s_main
-#define __pyx_n_s_maximum_path_c __pyx_mstate_global->__pyx_n_s_maximum_path_c
-#define __pyx_n_s_memview __pyx_mstate_global->__pyx_n_s_memview
-#define __pyx_n_s_mode __pyx_mstate_global->__pyx_n_s_mode
-#define __pyx_n_s_monotonic_align_core __pyx_mstate_global->__pyx_n_s_monotonic_align_core
-#define __pyx_n_s_name __pyx_mstate_global->__pyx_n_s_name
-#define __pyx_n_s_name_2 __pyx_mstate_global->__pyx_n_s_name_2
-#define __pyx_n_s_ndim __pyx_mstate_global->__pyx_n_s_ndim
-#define __pyx_n_s_new __pyx_mstate_global->__pyx_n_s_new
-#define __pyx_kp_s_no_default___reduce___due_to_non __pyx_mstate_global->__pyx_kp_s_no_default___reduce___due_to_non
-#define __pyx_n_s_obj __pyx_mstate_global->__pyx_n_s_obj
-#define __pyx_n_s_pack __pyx_mstate_global->__pyx_n_s_pack
-#define __pyx_n_s_paths __pyx_mstate_global->__pyx_n_s_paths
-#define __pyx_n_s_pickle __pyx_mstate_global->__pyx_n_s_pickle
-#define __pyx_n_s_pyx_PickleError __pyx_mstate_global->__pyx_n_s_pyx_PickleError
-#define __pyx_n_s_pyx_checksum __pyx_mstate_global->__pyx_n_s_pyx_checksum
-#define __pyx_n_s_pyx_result __pyx_mstate_global->__pyx_n_s_pyx_result
-#define __pyx_n_s_pyx_state __pyx_mstate_global->__pyx_n_s_pyx_state
-#define __pyx_n_s_pyx_type __pyx_mstate_global->__pyx_n_s_pyx_type
-#define __pyx_n_s_pyx_unpickle_Enum __pyx_mstate_global->__pyx_n_s_pyx_unpickle_Enum
-#define __pyx_n_s_pyx_vtable __pyx_mstate_global->__pyx_n_s_pyx_vtable
-#define __pyx_n_s_range __pyx_mstate_global->__pyx_n_s_range
-#define __pyx_n_s_reduce __pyx_mstate_global->__pyx_n_s_reduce
-#define __pyx_n_s_reduce_cython __pyx_mstate_global->__pyx_n_s_reduce_cython
-#define __pyx_n_s_reduce_ex __pyx_mstate_global->__pyx_n_s_reduce_ex
-#define __pyx_n_s_register __pyx_mstate_global->__pyx_n_s_register
-#define __pyx_n_s_setstate __pyx_mstate_global->__pyx_n_s_setstate
-#define __pyx_n_s_setstate_cython __pyx_mstate_global->__pyx_n_s_setstate_cython
-#define __pyx_n_s_shape __pyx_mstate_global->__pyx_n_s_shape
-#define __pyx_n_s_size __pyx_mstate_global->__pyx_n_s_size
-#define __pyx_n_s_spec __pyx_mstate_global->__pyx_n_s_spec
-#define __pyx_n_s_start __pyx_mstate_global->__pyx_n_s_start
-#define __pyx_n_s_step __pyx_mstate_global->__pyx_n_s_step
-#define __pyx_n_s_stop __pyx_mstate_global->__pyx_n_s_stop
-#define __pyx_kp_s_strided_and_direct __pyx_mstate_global->__pyx_kp_s_strided_and_direct
-#define __pyx_kp_s_strided_and_direct_or_indirect __pyx_mstate_global->__pyx_kp_s_strided_and_direct_or_indirect
-#define __pyx_kp_s_strided_and_indirect __pyx_mstate_global->__pyx_kp_s_strided_and_indirect
-#define __pyx_kp_s_stringsource __pyx_mstate_global->__pyx_kp_s_stringsource
-#define __pyx_n_s_struct __pyx_mstate_global->__pyx_n_s_struct
-#define __pyx_n_s_sys __pyx_mstate_global->__pyx_n_s_sys
-#define __pyx_n_s_t_xs __pyx_mstate_global->__pyx_n_s_t_xs
-#define __pyx_n_s_t_ys __pyx_mstate_global->__pyx_n_s_t_ys
-#define __pyx_n_s_test __pyx_mstate_global->__pyx_n_s_test
-#define __pyx_kp_s_unable_to_allocate_array_data __pyx_mstate_global->__pyx_kp_s_unable_to_allocate_array_data
-#define __pyx_kp_s_unable_to_allocate_shape_and_str __pyx_mstate_global->__pyx_kp_s_unable_to_allocate_shape_and_str
-#define __pyx_n_s_unpack __pyx_mstate_global->__pyx_n_s_unpack
-#define __pyx_n_s_update __pyx_mstate_global->__pyx_n_s_update
-#define __pyx_n_s_values __pyx_mstate_global->__pyx_n_s_values
-#define __pyx_n_s_version_info __pyx_mstate_global->__pyx_n_s_version_info
-#define __pyx_int_0 __pyx_mstate_global->__pyx_int_0
-#define __pyx_int_1 __pyx_mstate_global->__pyx_int_1
-#define __pyx_int_3 __pyx_mstate_global->__pyx_int_3
-#define __pyx_int_112105877 __pyx_mstate_global->__pyx_int_112105877
-#define __pyx_int_136983863 __pyx_mstate_global->__pyx_int_136983863
-#define __pyx_int_184977713 __pyx_mstate_global->__pyx_int_184977713
-#define __pyx_int_neg_1 __pyx_mstate_global->__pyx_int_neg_1
-#define __pyx_k__9 __pyx_mstate_global->__pyx_k__9
-#define __pyx_slice__5 __pyx_mstate_global->__pyx_slice__5
-#define __pyx_tuple__4 __pyx_mstate_global->__pyx_tuple__4
-#define __pyx_tuple__8 __pyx_mstate_global->__pyx_tuple__8
-#define __pyx_tuple__10 __pyx_mstate_global->__pyx_tuple__10
-#define __pyx_tuple__11 __pyx_mstate_global->__pyx_tuple__11
-#define __pyx_tuple__12 __pyx_mstate_global->__pyx_tuple__12
-#define __pyx_tuple__13 __pyx_mstate_global->__pyx_tuple__13
-#define __pyx_tuple__14 __pyx_mstate_global->__pyx_tuple__14
-#define __pyx_tuple__15 __pyx_mstate_global->__pyx_tuple__15
-#define __pyx_tuple__16 __pyx_mstate_global->__pyx_tuple__16
-#define __pyx_tuple__17 __pyx_mstate_global->__pyx_tuple__17
-#define __pyx_tuple__18 __pyx_mstate_global->__pyx_tuple__18
-#define __pyx_tuple__19 __pyx_mstate_global->__pyx_tuple__19
-#define __pyx_tuple__21 __pyx_mstate_global->__pyx_tuple__21
-#define __pyx_codeobj__20 __pyx_mstate_global->__pyx_codeobj__20
-#define __pyx_codeobj__22 __pyx_mstate_global->__pyx_codeobj__22
-/* #### Code section: module_code ### */
-
-/* "View.MemoryView":131
- * cdef bint dtype_is_object
- *
- * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
- * mode="c", bint allocate_buffer=True):
- *
- */
-
-/* Python wrapper */
-static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
-static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
- PyObject *__pyx_v_shape = 0;
- Py_ssize_t __pyx_v_itemsize;
- PyObject *__pyx_v_format = 0;
- PyObject *__pyx_v_mode = 0;
- int __pyx_v_allocate_buffer;
- CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- int __pyx_r;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
- {
- PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0};
- PyObject* values[5] = {0,0,0,0,0};
- values[3] = ((PyObject *)__pyx_n_s_c);
- if (__pyx_kwds) {
- Py_ssize_t kw_args;
- switch (__pyx_nargs) {
- case 5: values[4] = __Pyx_Arg_VARARGS(__pyx_args, 4);
- CYTHON_FALLTHROUGH;
- case 4: values[3] = __Pyx_Arg_VARARGS(__pyx_args, 3);
- CYTHON_FALLTHROUGH;
- case 3: values[2] = __Pyx_Arg_VARARGS(__pyx_args, 2);
- CYTHON_FALLTHROUGH;
- case 2: values[1] = __Pyx_Arg_VARARGS(__pyx_args, 1);
- CYTHON_FALLTHROUGH;
- case 1: values[0] = __Pyx_Arg_VARARGS(__pyx_args, 0);
- CYTHON_FALLTHROUGH;
- case 0: break;
- default: goto __pyx_L5_argtuple_error;
- }
- kw_args = __Pyx_NumKwargs_VARARGS(__pyx_kwds);
- switch (__pyx_nargs) {
- case 0:
- if (likely((values[0] = __Pyx_GetKwValue_VARARGS(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_shape)) != 0)) kw_args--;
- else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 131, __pyx_L3_error)
- else goto __pyx_L5_argtuple_error;
- CYTHON_FALLTHROUGH;
- case 1:
- if (likely((values[1] = __Pyx_GetKwValue_VARARGS(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_itemsize)) != 0)) kw_args--;
- else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 131, __pyx_L3_error)
- else {
- __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(1, 131, __pyx_L3_error)
- }
- CYTHON_FALLTHROUGH;
- case 2:
- if (likely((values[2] = __Pyx_GetKwValue_VARARGS(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_format)) != 0)) kw_args--;
- else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 131, __pyx_L3_error)
- else {
- __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(1, 131, __pyx_L3_error)
- }
- CYTHON_FALLTHROUGH;
- case 3:
- if (kw_args > 0) {
- PyObject* value = __Pyx_GetKwValue_VARARGS(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_mode);
- if (value) { values[3] = value; kw_args--; }
- else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 131, __pyx_L3_error)
- }
- CYTHON_FALLTHROUGH;
- case 4:
- if (kw_args > 0) {
- PyObject* value = __Pyx_GetKwValue_VARARGS(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_allocate_buffer);
- if (value) { values[4] = value; kw_args--; }
- else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 131, __pyx_L3_error)
- }
- }
- if (unlikely(kw_args > 0)) {
- const Py_ssize_t kwd_pos_args = __pyx_nargs;
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "__cinit__") < 0)) __PYX_ERR(1, 131, __pyx_L3_error)
- }
- } else {
- switch (__pyx_nargs) {
- case 5: values[4] = __Pyx_Arg_VARARGS(__pyx_args, 4);
- CYTHON_FALLTHROUGH;
- case 4: values[3] = __Pyx_Arg_VARARGS(__pyx_args, 3);
- CYTHON_FALLTHROUGH;
- case 3: values[2] = __Pyx_Arg_VARARGS(__pyx_args, 2);
- values[1] = __Pyx_Arg_VARARGS(__pyx_args, 1);
- values[0] = __Pyx_Arg_VARARGS(__pyx_args, 0);
- break;
- default: goto __pyx_L5_argtuple_error;
- }
- }
- __pyx_v_shape = ((PyObject*)values[0]);
- __pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 131, __pyx_L3_error)
- __pyx_v_format = values[2];
- __pyx_v_mode = values[3];
- if (values[4]) {
- __pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 132, __pyx_L3_error)
- } else {
-
- /* "View.MemoryView":132
- *
- * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None,
- * mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<<
- *
- * cdef int idx
- */
- __pyx_v_allocate_buffer = ((int)1);
- }
- }
- goto __pyx_L4_argument_unpacking_done;
- __pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, __pyx_nargs); __PYX_ERR(1, 131, __pyx_L3_error)
- __pyx_L3_error:;
- __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __Pyx_RefNannyFinishContext();
- return -1;
- __pyx_L4_argument_unpacking_done:;
- if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(1, 131, __pyx_L1_error)
- if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) {
- PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(1, 131, __pyx_L1_error)
- }
- __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer);
-
- /* "View.MemoryView":131
- * cdef bint dtype_is_object
- *
- * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
- * mode="c", bint allocate_buffer=True):
- *
- */
-
- /* function exit code */
- goto __pyx_L0;
- __pyx_L1_error:;
- __pyx_r = -1;
- __pyx_L0:;
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) {
- int __pyx_v_idx;
- Py_ssize_t __pyx_v_dim;
- char __pyx_v_order;
- int __pyx_r;
- __Pyx_RefNannyDeclarations
- Py_ssize_t __pyx_t_1;
- int __pyx_t_2;
- int __pyx_t_3;
- PyObject *__pyx_t_4 = NULL;
- PyObject *__pyx_t_5 = NULL;
- PyObject *__pyx_t_6 = NULL;
- int __pyx_t_7;
- char *__pyx_t_8;
- Py_ssize_t __pyx_t_9;
- Py_UCS4 __pyx_t_10;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("__cinit__", 0);
- __Pyx_INCREF(__pyx_v_format);
-
- /* "View.MemoryView":137
- * cdef Py_ssize_t dim
- *
- * self.ndim = len(shape) # <<<<<<<<<<<<<<
- * self.itemsize = itemsize
- *
- */
- if (unlikely(__pyx_v_shape == Py_None)) {
- PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
- __PYX_ERR(1, 137, __pyx_L1_error)
- }
- __pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(1, 137, __pyx_L1_error)
- __pyx_v_self->ndim = ((int)__pyx_t_1);
-
- /* "View.MemoryView":138
- *
- * self.ndim = len(shape)
- * self.itemsize = itemsize # <<<<<<<<<<<<<<
- *
- * if not self.ndim:
- */
- __pyx_v_self->itemsize = __pyx_v_itemsize;
-
- /* "View.MemoryView":140
- * self.itemsize = itemsize
- *
- * if not self.ndim: # <<<<<<<<<<<<<<
- * raise ValueError, "Empty shape tuple for cython.array"
- *
- */
- __pyx_t_2 = (!(__pyx_v_self->ndim != 0));
- if (unlikely(__pyx_t_2)) {
-
- /* "View.MemoryView":141
- *
- * if not self.ndim:
- * raise ValueError, "Empty shape tuple for cython.array" # <<<<<<<<<<<<<<
- *
- * if itemsize <= 0:
- */
- __Pyx_Raise(__pyx_builtin_ValueError, __pyx_kp_s_Empty_shape_tuple_for_cython_arr, 0, 0);
- __PYX_ERR(1, 141, __pyx_L1_error)
-
- /* "View.MemoryView":140
- * self.itemsize = itemsize
- *
- * if not self.ndim: # <<<<<<<<<<<<<<
- * raise ValueError, "Empty shape tuple for cython.array"
- *
- */
- }
-
- /* "View.MemoryView":143
- * raise ValueError, "Empty shape tuple for cython.array"
- *
- * if itemsize <= 0: # <<<<<<<<<<<<<<
- * raise ValueError, "itemsize <= 0 for cython.array"
- *
- */
- __pyx_t_2 = (__pyx_v_itemsize <= 0);
- if (unlikely(__pyx_t_2)) {
-
- /* "View.MemoryView":144
- *
- * if itemsize <= 0:
- * raise ValueError, "itemsize <= 0 for cython.array" # <<<<<<<<<<<<<<
- *
- * if not isinstance(format, bytes):
- */
- __Pyx_Raise(__pyx_builtin_ValueError, __pyx_kp_s_itemsize_0_for_cython_array, 0, 0);
- __PYX_ERR(1, 144, __pyx_L1_error)
-
- /* "View.MemoryView":143
- * raise ValueError, "Empty shape tuple for cython.array"
- *
- * if itemsize <= 0: # <<<<<<<<<<<<<<
- * raise ValueError, "itemsize <= 0 for cython.array"
- *
- */
- }
-
- /* "View.MemoryView":146
- * raise ValueError, "itemsize <= 0 for cython.array"
- *
- * if not isinstance(format, bytes): # <<<<<<<<<<<<<<
- * format = format.encode('ASCII')
- * self._format = format # keep a reference to the byte string
- */
- __pyx_t_2 = PyBytes_Check(__pyx_v_format);
- __pyx_t_3 = (!__pyx_t_2);
- if (__pyx_t_3) {
-
- /* "View.MemoryView":147
- *
- * if not isinstance(format, bytes):
- * format = format.encode('ASCII') # <<<<<<<<<<<<<<
- * self._format = format # keep a reference to the byte string
- * self.format = self._format
- */
- __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 147, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_5);
- __pyx_t_6 = NULL;
- __pyx_t_7 = 0;
- if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) {
- __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5);
- if (likely(__pyx_t_6)) {
- PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
- __Pyx_INCREF(__pyx_t_6);
- __Pyx_INCREF(function);
- __Pyx_DECREF_SET(__pyx_t_5, function);
- __pyx_t_7 = 1;
- }
- }
- {
- PyObject *__pyx_callargs[2] = {__pyx_t_6, __pyx_n_s_ASCII};
- __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+1-__pyx_t_7, 1+__pyx_t_7);
- __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
- if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 147, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_4);
- __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- }
- __Pyx_DECREF_SET(__pyx_v_format, __pyx_t_4);
- __pyx_t_4 = 0;
-
- /* "View.MemoryView":146
- * raise ValueError, "itemsize <= 0 for cython.array"
- *
- * if not isinstance(format, bytes): # <<<<<<<<<<<<<<
- * format = format.encode('ASCII')
- * self._format = format # keep a reference to the byte string
- */
- }
-
- /* "View.MemoryView":148
- * if not isinstance(format, bytes):
- * format = format.encode('ASCII')
- * self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<<
- * self.format = self._format
- *
- */
- if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None) || __Pyx_RaiseUnexpectedTypeError("bytes", __pyx_v_format))) __PYX_ERR(1, 148, __pyx_L1_error)
- __pyx_t_4 = __pyx_v_format;
- __Pyx_INCREF(__pyx_t_4);
- __Pyx_GIVEREF(__pyx_t_4);
- __Pyx_GOTREF(__pyx_v_self->_format);
- __Pyx_DECREF(__pyx_v_self->_format);
- __pyx_v_self->_format = ((PyObject*)__pyx_t_4);
- __pyx_t_4 = 0;
-
- /* "View.MemoryView":149
- * format = format.encode('ASCII')
- * self._format = format # keep a reference to the byte string
- * self.format = self._format # <<<<<<<<<<<<<<
- *
- *
- */
- if (unlikely(__pyx_v_self->_format == Py_None)) {
- PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found");
- __PYX_ERR(1, 149, __pyx_L1_error)
- }
- __pyx_t_8 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_8) && PyErr_Occurred())) __PYX_ERR(1, 149, __pyx_L1_error)
- __pyx_v_self->format = __pyx_t_8;
-
- /* "View.MemoryView":152
- *
- *
- * self._shape = PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<<
- * self._strides = self._shape + self.ndim
- *
- */
- __pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2)));
-
- /* "View.MemoryView":153
- *
- * self._shape = PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2)
- * self._strides = self._shape + self.ndim # <<<<<<<<<<<<<<
- *
- * if not self._shape:
- */
- __pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim);
-
- /* "View.MemoryView":155
- * self._strides = self._shape + self.ndim
- *
- * if not self._shape: # <<<<<<<<<<<<<<
- * raise MemoryError, "unable to allocate shape and strides."
- *
- */
- __pyx_t_3 = (!(__pyx_v_self->_shape != 0));
- if (unlikely(__pyx_t_3)) {
-
- /* "View.MemoryView":156
- *
- * if not self._shape:
- * raise MemoryError, "unable to allocate shape and strides." # <<<<<<<<<<<<<<
- *
- *
- */
- __Pyx_Raise(__pyx_builtin_MemoryError, __pyx_kp_s_unable_to_allocate_shape_and_str, 0, 0);
- __PYX_ERR(1, 156, __pyx_L1_error)
-
- /* "View.MemoryView":155
- * self._strides = self._shape + self.ndim
- *
- * if not self._shape: # <<<<<<<<<<<<<<
- * raise MemoryError, "unable to allocate shape and strides."
- *
- */
- }
-
- /* "View.MemoryView":159
- *
- *
- * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<<
- * if dim <= 0:
- * raise ValueError, f"Invalid shape in axis {idx}: {dim}."
- */
- __pyx_t_7 = 0;
- __pyx_t_4 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_4); __pyx_t_1 = 0;
- for (;;) {
- if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_4)) break;
- #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
- __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; if (unlikely((0 < 0))) __PYX_ERR(1, 159, __pyx_L1_error)
- #else
- __pyx_t_5 = PySequence_ITEM(__pyx_t_4, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 159, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_5);
- #endif
- __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 159, __pyx_L1_error)
- __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- __pyx_v_dim = __pyx_t_9;
- __pyx_v_idx = __pyx_t_7;
- __pyx_t_7 = (__pyx_t_7 + 1);
-
- /* "View.MemoryView":160
- *
- * for idx, dim in enumerate(shape):
- * if dim <= 0: # <<<<<<<<<<<<<<
- * raise ValueError, f"Invalid shape in axis {idx}: {dim}."
- * self._shape[idx] = dim
- */
- __pyx_t_3 = (__pyx_v_dim <= 0);
- if (unlikely(__pyx_t_3)) {
-
- /* "View.MemoryView":161
- * for idx, dim in enumerate(shape):
- * if dim <= 0:
- * raise ValueError, f"Invalid shape in axis {idx}: {dim}." # <<<<<<<<<<<<<<
- * self._shape[idx] = dim
- *
- */
- __pyx_t_5 = PyTuple_New(5); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 161, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_5);
- __pyx_t_9 = 0;
- __pyx_t_10 = 127;
- __Pyx_INCREF(__pyx_kp_u_Invalid_shape_in_axis);
- __pyx_t_9 += 22;
- __Pyx_GIVEREF(__pyx_kp_u_Invalid_shape_in_axis);
- PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_kp_u_Invalid_shape_in_axis);
- __pyx_t_6 = __Pyx_PyUnicode_From_int(__pyx_v_idx, 0, ' ', 'd'); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 161, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_6);
- __pyx_t_9 += __Pyx_PyUnicode_GET_LENGTH(__pyx_t_6);
- __Pyx_GIVEREF(__pyx_t_6);
- PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_6);
- __pyx_t_6 = 0;
- __Pyx_INCREF(__pyx_kp_u_);
- __pyx_t_9 += 2;
- __Pyx_GIVEREF(__pyx_kp_u_);
- PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_kp_u_);
- __pyx_t_6 = __Pyx_PyUnicode_From_Py_ssize_t(__pyx_v_dim, 0, ' ', 'd'); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 161, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_6);
- __pyx_t_9 += __Pyx_PyUnicode_GET_LENGTH(__pyx_t_6);
- __Pyx_GIVEREF(__pyx_t_6);
- PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_6);
- __pyx_t_6 = 0;
- __Pyx_INCREF(__pyx_kp_u__2);
- __pyx_t_9 += 1;
- __Pyx_GIVEREF(__pyx_kp_u__2);
- PyTuple_SET_ITEM(__pyx_t_5, 4, __pyx_kp_u__2);
- __pyx_t_6 = __Pyx_PyUnicode_Join(__pyx_t_5, 5, __pyx_t_9, __pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 161, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_6);
- __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- __Pyx_Raise(__pyx_builtin_ValueError, __pyx_t_6, 0, 0);
- __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
- __PYX_ERR(1, 161, __pyx_L1_error)
-
- /* "View.MemoryView":160
- *
- * for idx, dim in enumerate(shape):
- * if dim <= 0: # <<<<<<<<<<<<<<
- * raise ValueError, f"Invalid shape in axis {idx}: {dim}."
- * self._shape[idx] = dim
- */
- }
-
- /* "View.MemoryView":162
- * if dim <= 0:
- * raise ValueError, f"Invalid shape in axis {idx}: {dim}."
- * self._shape[idx] = dim # <<<<<<<<<<<<<<
- *
- * cdef char order
- */
- (__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim;
-
- /* "View.MemoryView":159
- *
- *
- * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<<
- * if dim <= 0:
- * raise ValueError, f"Invalid shape in axis {idx}: {dim}."
- */
- }
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-
- /* "View.MemoryView":165
- *
- * cdef char order
- * if mode == 'c': # <<<<<<<<<<<<<<
- * order = b'C'
- * self.mode = u'c'
- */
- __pyx_t_3 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(1, 165, __pyx_L1_error)
- if (__pyx_t_3) {
-
- /* "View.MemoryView":166
- * cdef char order
- * if mode == 'c':
- * order = b'C' # <<<<<<<<<<<<<<
- * self.mode = u'c'
- * elif mode == 'fortran':
- */
- __pyx_v_order = 'C';
-
- /* "View.MemoryView":167
- * if mode == 'c':
- * order = b'C'
- * self.mode = u'c' # <<<<<<<<<<<<<<
- * elif mode == 'fortran':
- * order = b'F'
- */
- __Pyx_INCREF(__pyx_n_u_c);
- __Pyx_GIVEREF(__pyx_n_u_c);
- __Pyx_GOTREF(__pyx_v_self->mode);
- __Pyx_DECREF(__pyx_v_self->mode);
- __pyx_v_self->mode = __pyx_n_u_c;
-
- /* "View.MemoryView":165
- *
- * cdef char order
- * if mode == 'c': # <<<<<<<<<<<<<<
- * order = b'C'
- * self.mode = u'c'
- */
- goto __pyx_L11;
- }
-
- /* "View.MemoryView":168
- * order = b'C'
- * self.mode = u'c'
- * elif mode == 'fortran': # <<<<<<<<<<<<<<
- * order = b'F'
- * self.mode = u'fortran'
- */
- __pyx_t_3 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(1, 168, __pyx_L1_error)
- if (likely(__pyx_t_3)) {
-
- /* "View.MemoryView":169
- * self.mode = u'c'
- * elif mode == 'fortran':
- * order = b'F' # <<<<<<<<<<<<<<
- * self.mode = u'fortran'
- * else:
- */
- __pyx_v_order = 'F';
-
- /* "View.MemoryView":170
- * elif mode == 'fortran':
- * order = b'F'
- * self.mode = u'fortran' # <<<<<<<<<<<<<<
- * else:
- * raise ValueError, f"Invalid mode, expected 'c' or 'fortran', got {mode}"
- */
- __Pyx_INCREF(__pyx_n_u_fortran);
- __Pyx_GIVEREF(__pyx_n_u_fortran);
- __Pyx_GOTREF(__pyx_v_self->mode);
- __Pyx_DECREF(__pyx_v_self->mode);
- __pyx_v_self->mode = __pyx_n_u_fortran;
-
- /* "View.MemoryView":168
- * order = b'C'
- * self.mode = u'c'
- * elif mode == 'fortran': # <<<<<<<<<<<<<<
- * order = b'F'
- * self.mode = u'fortran'
- */
- goto __pyx_L11;
- }
-
- /* "View.MemoryView":172
- * self.mode = u'fortran'
- * else:
- * raise ValueError, f"Invalid mode, expected 'c' or 'fortran', got {mode}" # <<<<<<<<<<<<<<
- *
- * self.len = fill_contig_strides_array(self._shape, self._strides, itemsize, self.ndim, order)
- */
- /*else*/ {
- __pyx_t_4 = __Pyx_PyObject_FormatSimple(__pyx_v_mode, __pyx_empty_unicode); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 172, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_4);
- __pyx_t_6 = __Pyx_PyUnicode_Concat(__pyx_kp_u_Invalid_mode_expected_c_or_fortr, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 172, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_6);
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- __Pyx_Raise(__pyx_builtin_ValueError, __pyx_t_6, 0, 0);
- __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
- __PYX_ERR(1, 172, __pyx_L1_error)
- }
- __pyx_L11:;
-
- /* "View.MemoryView":174
- * raise ValueError, f"Invalid mode, expected 'c' or 'fortran', got {mode}"
- *
- * self.len = fill_contig_strides_array(self._shape, self._strides, itemsize, self.ndim, order) # <<<<<<<<<<<<<<
- *
- * self.free_data = allocate_buffer
- */
- __pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order);
-
- /* "View.MemoryView":176
- * self.len = fill_contig_strides_array(self._shape, self._strides, itemsize, self.ndim, order)
- *
- * self.free_data = allocate_buffer # <<<<<<<<<<<<<<
- * self.dtype_is_object = format == b'O'
- *
- */
- __pyx_v_self->free_data = __pyx_v_allocate_buffer;
-
- /* "View.MemoryView":177
- *
- * self.free_data = allocate_buffer
- * self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<<
- *
- * if allocate_buffer:
- */
- __pyx_t_6 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 177, __pyx_L1_error)
- __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 177, __pyx_L1_error)
- __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
- __pyx_v_self->dtype_is_object = __pyx_t_3;
-
- /* "View.MemoryView":179
- * self.dtype_is_object = format == b'O'
- *
- * if allocate_buffer: # <<<<<<<<<<<<<<
- * _allocate_buffer(self)
- *
- */
- if (__pyx_v_allocate_buffer) {
-
- /* "View.MemoryView":180
- *
- * if allocate_buffer:
- * _allocate_buffer(self) # <<<<<<<<<<<<<<
- *
- * @cname('getbuffer')
- */
- __pyx_t_7 = __pyx_array_allocate_buffer(__pyx_v_self); if (unlikely(__pyx_t_7 == ((int)-1))) __PYX_ERR(1, 180, __pyx_L1_error)
-
- /* "View.MemoryView":179
- * self.dtype_is_object = format == b'O'
- *
- * if allocate_buffer: # <<<<<<<<<<<<<<
- * _allocate_buffer(self)
- *
- */
- }
-
- /* "View.MemoryView":131
- * cdef bint dtype_is_object
- *
- * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
- * mode="c", bint allocate_buffer=True):
- *
- */
-
- /* function exit code */
- __pyx_r = 0;
- goto __pyx_L0;
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_4);
- __Pyx_XDECREF(__pyx_t_5);
- __Pyx_XDECREF(__pyx_t_6);
- __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = -1;
- __pyx_L0:;
- __Pyx_XDECREF(__pyx_v_format);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":182
- * _allocate_buffer(self)
- *
- * @cname('getbuffer') # <<<<<<<<<<<<<<
- * def __getbuffer__(self, Py_buffer *info, int flags):
- * cdef int bufmode = -1
- */
-
-/* Python wrapper */
-CYTHON_UNUSED static int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
-CYTHON_UNUSED static int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
- int __pyx_r;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
- __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
- int __pyx_v_bufmode;
- int __pyx_r;
- __Pyx_RefNannyDeclarations
- int __pyx_t_1;
- char *__pyx_t_2;
- Py_ssize_t __pyx_t_3;
- int __pyx_t_4;
- Py_ssize_t *__pyx_t_5;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- if (unlikely(__pyx_v_info == NULL)) {
- PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete");
- return -1;
- }
- __Pyx_RefNannySetupContext("__getbuffer__", 0);
- __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
- __Pyx_GIVEREF(__pyx_v_info->obj);
-
- /* "View.MemoryView":184
- * @cname('getbuffer')
- * def __getbuffer__(self, Py_buffer *info, int flags):
- * cdef int bufmode = -1 # <<<<<<<<<<<<<<
- * if flags & (PyBUF_C_CONTIGUOUS | PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS):
- * if self.mode == u"c":
- */
- __pyx_v_bufmode = -1;
-
- /* "View.MemoryView":185
- * def __getbuffer__(self, Py_buffer *info, int flags):
- * cdef int bufmode = -1
- * if flags & (PyBUF_C_CONTIGUOUS | PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS): # <<<<<<<<<<<<<<
- * if self.mode == u"c":
- * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
- */
- __pyx_t_1 = ((__pyx_v_flags & ((PyBUF_C_CONTIGUOUS | PyBUF_F_CONTIGUOUS) | PyBUF_ANY_CONTIGUOUS)) != 0);
- if (__pyx_t_1) {
-
- /* "View.MemoryView":186
- * cdef int bufmode = -1
- * if flags & (PyBUF_C_CONTIGUOUS | PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS):
- * if self.mode == u"c": # <<<<<<<<<<<<<<
- * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
- * elif self.mode == u"fortran":
- */
- __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(1, 186, __pyx_L1_error)
- if (__pyx_t_1) {
-
- /* "View.MemoryView":187
- * if flags & (PyBUF_C_CONTIGUOUS | PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS):
- * if self.mode == u"c":
- * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<<
- * elif self.mode == u"fortran":
- * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
- */
- __pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS);
-
- /* "View.MemoryView":186
- * cdef int bufmode = -1
- * if flags & (PyBUF_C_CONTIGUOUS | PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS):
- * if self.mode == u"c": # <<<<<<<<<<<<<<
- * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
- * elif self.mode == u"fortran":
- */
- goto __pyx_L4;
- }
-
- /* "View.MemoryView":188
- * if self.mode == u"c":
- * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
- * elif self.mode == u"fortran": # <<<<<<<<<<<<<<
- * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
- * if not (flags & bufmode):
- */
- __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(1, 188, __pyx_L1_error)
- if (__pyx_t_1) {
-
- /* "View.MemoryView":189
- * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
- * elif self.mode == u"fortran":
- * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<<
- * if not (flags & bufmode):
- * raise ValueError, "Can only create a buffer that is contiguous in memory."
- */
- __pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS);
-
- /* "View.MemoryView":188
- * if self.mode == u"c":
- * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
- * elif self.mode == u"fortran": # <<<<<<<<<<<<<<
- * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
- * if not (flags & bufmode):
- */
- }
- __pyx_L4:;
-
- /* "View.MemoryView":190
- * elif self.mode == u"fortran":
- * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
- * if not (flags & bufmode): # <<<<<<<<<<<<<<
- * raise ValueError, "Can only create a buffer that is contiguous in memory."
- * info.buf = self.data
- */
- __pyx_t_1 = (!((__pyx_v_flags & __pyx_v_bufmode) != 0));
- if (unlikely(__pyx_t_1)) {
-
- /* "View.MemoryView":191
- * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
- * if not (flags & bufmode):
- * raise ValueError, "Can only create a buffer that is contiguous in memory." # <<<<<<<<<<<<<<
- * info.buf = self.data
- * info.len = self.len
- */
- __Pyx_Raise(__pyx_builtin_ValueError, __pyx_kp_s_Can_only_create_a_buffer_that_is, 0, 0);
- __PYX_ERR(1, 191, __pyx_L1_error)
-
- /* "View.MemoryView":190
- * elif self.mode == u"fortran":
- * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
- * if not (flags & bufmode): # <<<<<<<<<<<<<<
- * raise ValueError, "Can only create a buffer that is contiguous in memory."
- * info.buf = self.data
- */
- }
-
- /* "View.MemoryView":185
- * def __getbuffer__(self, Py_buffer *info, int flags):
- * cdef int bufmode = -1
- * if flags & (PyBUF_C_CONTIGUOUS | PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS): # <<<<<<<<<<<<<<
- * if self.mode == u"c":
- * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
- */
- }
-
- /* "View.MemoryView":192
- * if not (flags & bufmode):
- * raise ValueError, "Can only create a buffer that is contiguous in memory."
- * info.buf = self.data # <<<<<<<<<<<<<<
- * info.len = self.len
- *
- */
- __pyx_t_2 = __pyx_v_self->data;
- __pyx_v_info->buf = __pyx_t_2;
-
- /* "View.MemoryView":193
- * raise ValueError, "Can only create a buffer that is contiguous in memory."
- * info.buf = self.data
- * info.len = self.len # <<<<<<<<<<<<<<
- *
- * if flags & PyBUF_STRIDES:
- */
- __pyx_t_3 = __pyx_v_self->len;
- __pyx_v_info->len = __pyx_t_3;
-
- /* "View.MemoryView":195
- * info.len = self.len
- *
- * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
- * info.ndim = self.ndim
- * info.shape = self._shape
- */
- __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0);
- if (__pyx_t_1) {
-
- /* "View.MemoryView":196
- *
- * if flags & PyBUF_STRIDES:
- * info.ndim = self.ndim # <<<<<<<<<<<<<<
- * info.shape = self._shape
- * info.strides = self._strides
- */
- __pyx_t_4 = __pyx_v_self->ndim;
- __pyx_v_info->ndim = __pyx_t_4;
-
- /* "View.MemoryView":197
- * if flags & PyBUF_STRIDES:
- * info.ndim = self.ndim
- * info.shape = self._shape # <<<<<<<<<<<<<<
- * info.strides = self._strides
- * else:
- */
- __pyx_t_5 = __pyx_v_self->_shape;
- __pyx_v_info->shape = __pyx_t_5;
-
- /* "View.MemoryView":198
- * info.ndim = self.ndim
- * info.shape = self._shape
- * info.strides = self._strides # <<<<<<<<<<<<<<
- * else:
- * info.ndim = 1
- */
- __pyx_t_5 = __pyx_v_self->_strides;
- __pyx_v_info->strides = __pyx_t_5;
-
- /* "View.MemoryView":195
- * info.len = self.len
- *
- * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
- * info.ndim = self.ndim
- * info.shape = self._shape
- */
- goto __pyx_L6;
- }
-
- /* "View.MemoryView":200
- * info.strides = self._strides
- * else:
- * info.ndim = 1 # <<<<<<<<<<<<<<
- * info.shape = &self.len if flags & PyBUF_ND else NULL
- * info.strides = NULL
- */
- /*else*/ {
- __pyx_v_info->ndim = 1;
-
- /* "View.MemoryView":201
- * else:
- * info.ndim = 1
- * info.shape = &self.len if flags & PyBUF_ND else NULL # <<<<<<<<<<<<<<
- * info.strides = NULL
- *
- */
- if (((__pyx_v_flags & PyBUF_ND) != 0)) {
- __pyx_t_5 = (&__pyx_v_self->len);
- } else {
- __pyx_t_5 = NULL;
- }
- __pyx_v_info->shape = __pyx_t_5;
-
- /* "View.MemoryView":202
- * info.ndim = 1
- * info.shape = &self.len if flags & PyBUF_ND else NULL
- * info.strides = NULL # <<<<<<<<<<<<<<
- *
- * info.suboffsets = NULL
- */
- __pyx_v_info->strides = NULL;
- }
- __pyx_L6:;
-
- /* "View.MemoryView":204
- * info.strides = NULL
- *
- * info.suboffsets = NULL # <<<<<<<<<<<<<<
- * info.itemsize = self.itemsize
- * info.readonly = 0
- */
- __pyx_v_info->suboffsets = NULL;
-
- /* "View.MemoryView":205
- *
- * info.suboffsets = NULL
- * info.itemsize = self.itemsize # <<<<<<<<<<<<<<
- * info.readonly = 0
- * info.format = self.format if flags & PyBUF_FORMAT else NULL
- */
- __pyx_t_3 = __pyx_v_self->itemsize;
- __pyx_v_info->itemsize = __pyx_t_3;
-
- /* "View.MemoryView":206
- * info.suboffsets = NULL
- * info.itemsize = self.itemsize
- * info.readonly = 0 # <<<<<<<<<<<<<<
- * info.format = self.format if flags & PyBUF_FORMAT else NULL
- * info.obj = self
- */
- __pyx_v_info->readonly = 0;
-
- /* "View.MemoryView":207
- * info.itemsize = self.itemsize
- * info.readonly = 0
- * info.format = self.format if flags & PyBUF_FORMAT else NULL # <<<<<<<<<<<<<<
- * info.obj = self
- *
- */
- if (((__pyx_v_flags & PyBUF_FORMAT) != 0)) {
- __pyx_t_2 = __pyx_v_self->format;
- } else {
- __pyx_t_2 = NULL;
- }
- __pyx_v_info->format = __pyx_t_2;
-
- /* "View.MemoryView":208
- * info.readonly = 0
- * info.format = self.format if flags & PyBUF_FORMAT else NULL
- * info.obj = self # <<<<<<<<<<<<<<
- *
- * def __dealloc__(array self):
- */
- __Pyx_INCREF((PyObject *)__pyx_v_self);
- __Pyx_GIVEREF((PyObject *)__pyx_v_self);
- __Pyx_GOTREF(__pyx_v_info->obj);
- __Pyx_DECREF(__pyx_v_info->obj);
- __pyx_v_info->obj = ((PyObject *)__pyx_v_self);
-
- /* "View.MemoryView":182
- * _allocate_buffer(self)
- *
- * @cname('getbuffer') # <<<<<<<<<<<<<<
- * def __getbuffer__(self, Py_buffer *info, int flags):
- * cdef int bufmode = -1
- */
-
- /* function exit code */
- __pyx_r = 0;
- goto __pyx_L0;
- __pyx_L1_error:;
- __Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = -1;
- if (__pyx_v_info->obj != NULL) {
- __Pyx_GOTREF(__pyx_v_info->obj);
- __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
- }
- goto __pyx_L2;
- __pyx_L0:;
- if (__pyx_v_info->obj == Py_None) {
- __Pyx_GOTREF(__pyx_v_info->obj);
- __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
- }
- __pyx_L2:;
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":210
- * info.obj = self
- *
- * def __dealloc__(array self): # <<<<<<<<<<<<<<
- * if self.callback_free_data != NULL:
- * self.callback_free_data(self.data)
- */
-
-/* Python wrapper */
-static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/
-static void __pyx_array___dealloc__(PyObject *__pyx_v_self) {
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
- __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self));
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
-}
-
-static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) {
- __Pyx_RefNannyDeclarations
- int __pyx_t_1;
- int __pyx_t_2;
- __Pyx_RefNannySetupContext("__dealloc__", 0);
-
- /* "View.MemoryView":211
- *
- * def __dealloc__(array self):
- * if self.callback_free_data != NULL: # <<<<<<<<<<<<<<
- * self.callback_free_data(self.data)
- * elif self.free_data and self.data is not NULL:
- */
- __pyx_t_1 = (__pyx_v_self->callback_free_data != NULL);
- if (__pyx_t_1) {
-
- /* "View.MemoryView":212
- * def __dealloc__(array self):
- * if self.callback_free_data != NULL:
- * self.callback_free_data(self.data) # <<<<<<<<<<<<<<
- * elif self.free_data and self.data is not NULL:
- * if self.dtype_is_object:
- */
- __pyx_v_self->callback_free_data(__pyx_v_self->data);
-
- /* "View.MemoryView":211
- *
- * def __dealloc__(array self):
- * if self.callback_free_data != NULL: # <<<<<<<<<<<<<<
- * self.callback_free_data(self.data)
- * elif self.free_data and self.data is not NULL:
- */
- goto __pyx_L3;
- }
-
- /* "View.MemoryView":213
- * if self.callback_free_data != NULL:
- * self.callback_free_data(self.data)
- * elif self.free_data and self.data is not NULL: # <<<<<<<<<<<<<<
- * if self.dtype_is_object:
- * refcount_objects_in_slice(self.data, self._shape, self._strides, self.ndim, inc=False)
- */
- if (__pyx_v_self->free_data) {
- } else {
- __pyx_t_1 = __pyx_v_self->free_data;
- goto __pyx_L4_bool_binop_done;
- }
- __pyx_t_2 = (__pyx_v_self->data != NULL);
- __pyx_t_1 = __pyx_t_2;
- __pyx_L4_bool_binop_done:;
- if (__pyx_t_1) {
-
- /* "View.MemoryView":214
- * self.callback_free_data(self.data)
- * elif self.free_data and self.data is not NULL:
- * if self.dtype_is_object: # <<<<<<<<<<<<<<
- * refcount_objects_in_slice(self.data, self._shape, self._strides, self.ndim, inc=False)
- * free(self.data)
- */
- if (__pyx_v_self->dtype_is_object) {
-
- /* "View.MemoryView":215
- * elif self.free_data and self.data is not NULL:
- * if self.dtype_is_object:
- * refcount_objects_in_slice(self.data, self._shape, self._strides, self.ndim, inc=False) # <<<<<<<<<<<<<<
- * free(self.data)
- * PyObject_Free(self._shape)
- */
- __pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0);
-
- /* "View.MemoryView":214
- * self.callback_free_data(self.data)
- * elif self.free_data and self.data is not NULL:
- * if self.dtype_is_object: # <<<<<<<<<<<<<<
- * refcount_objects_in_slice(self.data, self._shape, self._strides, self.ndim, inc=False)
- * free(self.data)
- */
- }
-
- /* "View.MemoryView":216
- * if self.dtype_is_object:
- * refcount_objects_in_slice(self.data, self._shape, self._strides, self.ndim, inc=False)
- * free(self.data) # <<<<<<<<<<<<<<
- * PyObject_Free(self._shape)
- *
- */
- free(__pyx_v_self->data);
-
- /* "View.MemoryView":213
- * if self.callback_free_data != NULL:
- * self.callback_free_data(self.data)
- * elif self.free_data and self.data is not NULL: # <<<<<<<<<<<<<<
- * if self.dtype_is_object:
- * refcount_objects_in_slice(self.data, self._shape, self._strides, self.ndim, inc=False)
- */
- }
- __pyx_L3:;
-
- /* "View.MemoryView":217
- * refcount_objects_in_slice(self.data, self._shape, self._strides, self.ndim, inc=False)
- * free(self.data)
- * PyObject_Free(self._shape) # <<<<<<<<<<<<<<
- *
- * @property
- */
- PyObject_Free(__pyx_v_self->_shape);
-
- /* "View.MemoryView":210
- * info.obj = self
- *
- * def __dealloc__(array self): # <<<<<<<<<<<<<<
- * if self.callback_free_data != NULL:
- * self.callback_free_data(self.data)
- */
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
-}
-
-/* "View.MemoryView":219
- * PyObject_Free(self._shape)
- *
- * @property # <<<<<<<<<<<<<<
- * def memview(self):
- * return self.get_memview()
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/
-static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) {
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
- PyObject *__pyx_r = 0;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
- __pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self));
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) {
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- PyObject *__pyx_t_1 = NULL;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("__get__", 0);
-
- /* "View.MemoryView":221
- * @property
- * def memview(self):
- * return self.get_memview() # <<<<<<<<<<<<<<
- *
- * @cname('get_memview')
- */
- __Pyx_XDECREF(__pyx_r);
- __pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 221, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- __pyx_r = __pyx_t_1;
- __pyx_t_1 = 0;
- goto __pyx_L0;
-
- /* "View.MemoryView":219
- * PyObject_Free(self._shape)
- *
- * @property # <<<<<<<<<<<<<<
- * def memview(self):
- * return self.get_memview()
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_1);
- __Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = NULL;
- __pyx_L0:;
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":224
- *
- * @cname('get_memview')
- * cdef get_memview(self): # <<<<<<<<<<<<<<
- * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
- * return memoryview(self, flags, self.dtype_is_object)
- */
-
-static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) {
- int __pyx_v_flags;
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- PyObject *__pyx_t_1 = NULL;
- PyObject *__pyx_t_2 = NULL;
- PyObject *__pyx_t_3 = NULL;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("get_memview", 0);
-
- /* "View.MemoryView":225
- * @cname('get_memview')
- * cdef get_memview(self):
- * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<<
- * return memoryview(self, flags, self.dtype_is_object)
- *
- */
- __pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE);
-
- /* "View.MemoryView":226
- * cdef get_memview(self):
- * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
- * return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<<
- *
- * def __len__(self):
- */
- __Pyx_XDECREF(__pyx_r);
- __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 226, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 226, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 226, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_3);
- __Pyx_INCREF((PyObject *)__pyx_v_self);
- __Pyx_GIVEREF((PyObject *)__pyx_v_self);
- PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self));
- __Pyx_GIVEREF(__pyx_t_1);
- PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
- __Pyx_GIVEREF(__pyx_t_2);
- PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
- __pyx_t_1 = 0;
- __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 226, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __pyx_r = __pyx_t_2;
- __pyx_t_2 = 0;
- goto __pyx_L0;
-
- /* "View.MemoryView":224
- *
- * @cname('get_memview')
- * cdef get_memview(self): # <<<<<<<<<<<<<<
- * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
- * return memoryview(self, flags, self.dtype_is_object)
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_1);
- __Pyx_XDECREF(__pyx_t_2);
- __Pyx_XDECREF(__pyx_t_3);
- __Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = 0;
- __pyx_L0:;
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":228
- * return memoryview(self, flags, self.dtype_is_object)
- *
- * def __len__(self): # <<<<<<<<<<<<<<
- * return self._shape[0]
- *
- */
-
-/* Python wrapper */
-static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/
-static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) {
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
- Py_ssize_t __pyx_r;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
- __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self));
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) {
- Py_ssize_t __pyx_r;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__len__", 0);
-
- /* "View.MemoryView":229
- *
- * def __len__(self):
- * return self._shape[0] # <<<<<<<<<<<<<<
- *
- * def __getattr__(self, attr):
- */
- __pyx_r = (__pyx_v_self->_shape[0]);
- goto __pyx_L0;
-
- /* "View.MemoryView":228
- * return memoryview(self, flags, self.dtype_is_object)
- *
- * def __len__(self): # <<<<<<<<<<<<<<
- * return self._shape[0]
- *
- */
-
- /* function exit code */
- __pyx_L0:;
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":231
- * return self._shape[0]
- *
- * def __getattr__(self, attr): # <<<<<<<<<<<<<<
- * return getattr(self.memview, attr)
- *
- */
-
-/* Python wrapper */
-static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/
-static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) {
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
- PyObject *__pyx_r = 0;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0);
- __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr));
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) {
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- PyObject *__pyx_t_1 = NULL;
- PyObject *__pyx_t_2 = NULL;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("__getattr__", 0);
-
- /* "View.MemoryView":232
- *
- * def __getattr__(self, attr):
- * return getattr(self.memview, attr) # <<<<<<<<<<<<<<
- *
- * def __getitem__(self, item):
- */
- __Pyx_XDECREF(__pyx_r);
- __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 232, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- __pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 232, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __pyx_r = __pyx_t_2;
- __pyx_t_2 = 0;
- goto __pyx_L0;
-
- /* "View.MemoryView":231
- * return self._shape[0]
- *
- * def __getattr__(self, attr): # <<<<<<<<<<<<<<
- * return getattr(self.memview, attr)
- *
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_1);
- __Pyx_XDECREF(__pyx_t_2);
- __Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = NULL;
- __pyx_L0:;
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":234
- * return getattr(self.memview, attr)
- *
- * def __getitem__(self, item): # <<<<<<<<<<<<<<
- * return self.memview[item]
- *
- */
-
-/* Python wrapper */
-static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/
-static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) {
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
- PyObject *__pyx_r = 0;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
- __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item));
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) {
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- PyObject *__pyx_t_1 = NULL;
- PyObject *__pyx_t_2 = NULL;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("__getitem__", 0);
-
- /* "View.MemoryView":235
- *
- * def __getitem__(self, item):
- * return self.memview[item] # <<<<<<<<<<<<<<
- *
- * def __setitem__(self, item, value):
- */
- __Pyx_XDECREF(__pyx_r);
- __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 235, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 235, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __pyx_r = __pyx_t_2;
- __pyx_t_2 = 0;
- goto __pyx_L0;
-
- /* "View.MemoryView":234
- * return getattr(self.memview, attr)
- *
- * def __getitem__(self, item): # <<<<<<<<<<<<<<
- * return self.memview[item]
- *
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_1);
- __Pyx_XDECREF(__pyx_t_2);
- __Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = NULL;
- __pyx_L0:;
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":237
- * return self.memview[item]
- *
- * def __setitem__(self, item, value): # <<<<<<<<<<<<<<
- * self.memview[item] = value
- *
- */
-
-/* Python wrapper */
-static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/
-static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) {
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
- int __pyx_r;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
- __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value));
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) {
- int __pyx_r;
- __Pyx_RefNannyDeclarations
- PyObject *__pyx_t_1 = NULL;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("__setitem__", 0);
-
- /* "View.MemoryView":238
- *
- * def __setitem__(self, item, value):
- * self.memview[item] = value # <<<<<<<<<<<<<<
- *
- *
- */
- __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 238, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- if (unlikely((PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0))) __PYX_ERR(1, 238, __pyx_L1_error)
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-
- /* "View.MemoryView":237
- * return self.memview[item]
- *
- * def __setitem__(self, item, value): # <<<<<<<<<<<<<<
- * self.memview[item] = value
- *
- */
-
- /* function exit code */
- __pyx_r = 0;
- goto __pyx_L0;
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_1);
- __Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = -1;
- __pyx_L0:;
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "(tree fragment)":1
- * def __reduce_cython__(self): # <<<<<<<<<<<<<<
- * raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
- * def __setstate_cython__(self, __pyx_state):
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self,
-#if CYTHON_METH_FASTCALL
-PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
-#else
-PyObject *__pyx_args, PyObject *__pyx_kwds
-#endif
-); /*proto*/
-static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self,
-#if CYTHON_METH_FASTCALL
-PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
-#else
-PyObject *__pyx_args, PyObject *__pyx_kwds
-#endif
-) {
- #if !CYTHON_METH_FASTCALL
- CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
- #endif
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
- PyObject *__pyx_r = 0;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
- if (unlikely(__pyx_nargs > 0)) {
- __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL;}
- if (unlikely(__pyx_kwds) && __Pyx_NumKwargs_FASTCALL(__pyx_kwds) && unlikely(!__Pyx_CheckKeywordStrings(__pyx_kwds, "__reduce_cython__", 0))) return NULL;
- __pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self));
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) {
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("__reduce_cython__", 0);
-
- /* "(tree fragment)":2
- * def __reduce_cython__(self):
- * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<<
- * def __setstate_cython__(self, __pyx_state):
- * raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
- */
- __Pyx_Raise(__pyx_builtin_TypeError, __pyx_kp_s_no_default___reduce___due_to_non, 0, 0);
- __PYX_ERR(1, 2, __pyx_L1_error)
-
- /* "(tree fragment)":1
- * def __reduce_cython__(self): # <<<<<<<<<<<<<<
- * raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
- * def __setstate_cython__(self, __pyx_state):
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = NULL;
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "(tree fragment)":3
- * def __reduce_cython__(self):
- * raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
- * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
- * raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self,
-#if CYTHON_METH_FASTCALL
-PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
-#else
-PyObject *__pyx_args, PyObject *__pyx_kwds
-#endif
-); /*proto*/
-static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self,
-#if CYTHON_METH_FASTCALL
-PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
-#else
-PyObject *__pyx_args, PyObject *__pyx_kwds
-#endif
-) {
- CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0;
- #if !CYTHON_METH_FASTCALL
- CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
- #endif
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- PyObject *__pyx_r = 0;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
- {
- PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_state,0};
- PyObject* values[1] = {0};
- if (__pyx_kwds) {
- Py_ssize_t kw_args;
- switch (__pyx_nargs) {
- case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0);
- CYTHON_FALLTHROUGH;
- case 0: break;
- default: goto __pyx_L5_argtuple_error;
- }
- kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds);
- switch (__pyx_nargs) {
- case 0:
- if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_pyx_state)) != 0)) kw_args--;
- else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 3, __pyx_L3_error)
- else goto __pyx_L5_argtuple_error;
- }
- if (unlikely(kw_args > 0)) {
- const Py_ssize_t kwd_pos_args = __pyx_nargs;
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "__setstate_cython__") < 0)) __PYX_ERR(1, 3, __pyx_L3_error)
- }
- } else if (unlikely(__pyx_nargs != 1)) {
- goto __pyx_L5_argtuple_error;
- } else {
- values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0);
- }
- __pyx_v___pyx_state = values[0];
- }
- goto __pyx_L4_argument_unpacking_done;
- __pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error)
- __pyx_L3_error:;
- __Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __Pyx_RefNannyFinishContext();
- return NULL;
- __pyx_L4_argument_unpacking_done:;
- __pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v___pyx_state);
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("__setstate_cython__", 0);
-
- /* "(tree fragment)":4
- * raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
- * def __setstate_cython__(self, __pyx_state):
- * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<<
- */
- __Pyx_Raise(__pyx_builtin_TypeError, __pyx_kp_s_no_default___reduce___due_to_non, 0, 0);
- __PYX_ERR(1, 4, __pyx_L1_error)
-
- /* "(tree fragment)":3
- * def __reduce_cython__(self):
- * raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
- * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
- * raise TypeError, "no default __reduce__ due to non-trivial __cinit__"
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = NULL;
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":248
- *
- * @cname("__pyx_array_allocate_buffer")
- * cdef int _allocate_buffer(array self) except -1: # <<<<<<<<<<<<<<
- *
- *
- */
-
-static int __pyx_array_allocate_buffer(struct __pyx_array_obj *__pyx_v_self) {
- Py_ssize_t __pyx_v_i;
- PyObject **__pyx_v_p;
- int __pyx_r;
- __Pyx_RefNannyDeclarations
- int __pyx_t_1;
- Py_ssize_t __pyx_t_2;
- Py_ssize_t __pyx_t_3;
- Py_ssize_t __pyx_t_4;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("_allocate_buffer", 0);
-
- /* "View.MemoryView":254
- * cdef PyObject **p
- *
- * self.free_data = True # <<<<<<<<<<<<<<
- * self.data = malloc(self.len)
- * if not self.data:
- */
- __pyx_v_self->free_data = 1;
-
- /* "View.MemoryView":255
- *
- * self.free_data = True
- * self.data = malloc(self.len) # <<<<<<<<<<<<<<
- * if not self.data:
- * raise MemoryError, "unable to allocate array data."
- */
- __pyx_v_self->data = ((char *)malloc(__pyx_v_self->len));
-
- /* "View.MemoryView":256
- * self.free_data = True
- * self.data = malloc(self.len)
- * if not self.data: # <<<<<<<<<<<<<<
- * raise MemoryError, "unable to allocate array data."
- *
- */
- __pyx_t_1 = (!(__pyx_v_self->data != 0));
- if (unlikely(__pyx_t_1)) {
-
- /* "View.MemoryView":257
- * self.data = malloc(self.len)
- * if not self.data:
- * raise MemoryError, "unable to allocate array data." # <<<<<<<<<<<<<<
- *
- * if self.dtype_is_object:
- */
- __Pyx_Raise(__pyx_builtin_MemoryError, __pyx_kp_s_unable_to_allocate_array_data, 0, 0);
- __PYX_ERR(1, 257, __pyx_L1_error)
-
- /* "View.MemoryView":256
- * self.free_data = True
- * self.data = malloc(self.len)
- * if not self.data: # <<<<<<<<<<<<<<
- * raise MemoryError, "unable to allocate array data."
- *
- */
- }
-
- /* "View.MemoryView":259
- * raise MemoryError, "unable to allocate array data."
- *
- * if self.dtype_is_object: # <<<<<<<<<<<<<<
- * p = self.data
- * for i in range(self.len // self.itemsize):
- */
- if (__pyx_v_self->dtype_is_object) {
-
- /* "View.MemoryView":260
- *
- * if self.dtype_is_object:
- * p = self.data # <<<<<<<<<<<<<<
- * for i in range(self.len // self.itemsize):
- * p[i] = Py_None
- */
- __pyx_v_p = ((PyObject **)__pyx_v_self->data);
-
- /* "View.MemoryView":261
- * if self.dtype_is_object:
- * p = self.data
- * for i in range(self.len // self.itemsize): # <<<<<<<<<<<<<<
- * p[i] = Py_None
- * Py_INCREF(Py_None)
- */
- if (unlikely(__pyx_v_self->itemsize == 0)) {
- PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
- __PYX_ERR(1, 261, __pyx_L1_error)
- }
- else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_self->itemsize == (Py_ssize_t)-1) && unlikely(__Pyx_UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) {
- PyErr_SetString(PyExc_OverflowError, "value too large to perform division");
- __PYX_ERR(1, 261, __pyx_L1_error)
- }
- __pyx_t_2 = __Pyx_div_Py_ssize_t(__pyx_v_self->len, __pyx_v_self->itemsize);
- __pyx_t_3 = __pyx_t_2;
- for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
- __pyx_v_i = __pyx_t_4;
-
- /* "View.MemoryView":262
- * p = self.data
- * for i in range(self.len // self.itemsize):
- * p[i] = Py_None # <<<<<<<<<<<<<<
- * Py_INCREF(Py_None)
- * return 0
- */
- (__pyx_v_p[__pyx_v_i]) = Py_None;
-
- /* "View.MemoryView":263
- * for i in range(self.len // self.itemsize):
- * p[i] = Py_None
- * Py_INCREF(Py_None) # <<<<<<<<<<<<<<
- * return 0
- *
- */
- Py_INCREF(Py_None);
- }
-
- /* "View.MemoryView":259
- * raise MemoryError, "unable to allocate array data."
- *
- * if self.dtype_is_object: # <<<<<<<<<<<<<<
- * p = self.data
- * for i in range(self.len // self.itemsize):
- */
- }
-
- /* "View.MemoryView":264
- * p[i] = Py_None
- * Py_INCREF(Py_None)
- * return 0 # <<<<<<<<<<<<<<
- *
- *
- */
- __pyx_r = 0;
- goto __pyx_L0;
-
- /* "View.MemoryView":248
- *
- * @cname("__pyx_array_allocate_buffer")
- * cdef int _allocate_buffer(array self) except -1: # <<<<<<<<<<<<<<
- *
- *
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_AddTraceback("View.MemoryView._allocate_buffer", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = -1;
- __pyx_L0:;
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":268
- *
- * @cname("__pyx_array_new")
- * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, char *c_mode, char *buf): # <<<<<<<<<<<<<<
- * cdef array result
- * cdef str mode = "fortran" if c_mode[0] == b'f' else "c" # this often comes from a constant C string.
- */
-
-static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_c_mode, char *__pyx_v_buf) {
- struct __pyx_array_obj *__pyx_v_result = 0;
- PyObject *__pyx_v_mode = 0;
- struct __pyx_array_obj *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- PyObject *__pyx_t_1 = NULL;
- int __pyx_t_2;
- PyObject *__pyx_t_3 = NULL;
- PyObject *__pyx_t_4 = NULL;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("array_cwrapper", 0);
-
- /* "View.MemoryView":270
- * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, char *c_mode, char *buf):
- * cdef array result
- * cdef str mode = "fortran" if c_mode[0] == b'f' else "c" # this often comes from a constant C string. # <<<<<<<<<<<<<<
- *
- * if buf is NULL:
- */
- if (((__pyx_v_c_mode[0]) == 'f')) {
- __Pyx_INCREF(__pyx_n_s_fortran);
- __pyx_t_1 = __pyx_n_s_fortran;
- } else {
- __Pyx_INCREF(__pyx_n_s_c);
- __pyx_t_1 = __pyx_n_s_c;
- }
- __pyx_v_mode = ((PyObject*)__pyx_t_1);
- __pyx_t_1 = 0;
-
- /* "View.MemoryView":272
- * cdef str mode = "fortran" if c_mode[0] == b'f' else "c" # this often comes from a constant C string.
- *
- * if buf is NULL: # <<<<<<<<<<<<<<
- * result = array.__new__(array, shape, itemsize, format, mode)
- * else:
- */
- __pyx_t_2 = (__pyx_v_buf == NULL);
- if (__pyx_t_2) {
-
- /* "View.MemoryView":273
- *
- * if buf is NULL:
- * result = array.__new__(array, shape, itemsize, format, mode) # <<<<<<<<<<<<<<
- * else:
- * result = array.__new__(array, shape, itemsize, format, mode, allocate_buffer=False)
- */
- __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 273, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- __pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 273, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_3);
- __pyx_t_4 = PyTuple_New(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 273, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_4);
- __Pyx_INCREF(__pyx_v_shape);
- __Pyx_GIVEREF(__pyx_v_shape);
- PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_v_shape);
- __Pyx_GIVEREF(__pyx_t_1);
- PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1);
- __Pyx_GIVEREF(__pyx_t_3);
- PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3);
- __Pyx_INCREF(__pyx_v_mode);
- __Pyx_GIVEREF(__pyx_v_mode);
- PyTuple_SET_ITEM(__pyx_t_4, 3, __pyx_v_mode);
- __pyx_t_1 = 0;
- __pyx_t_3 = 0;
- __pyx_t_3 = ((PyObject *)__pyx_tp_new_array(((PyTypeObject *)__pyx_array_type), __pyx_t_4, NULL)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 273, __pyx_L1_error)
- __Pyx_GOTREF((PyObject *)__pyx_t_3);
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_3);
- __pyx_t_3 = 0;
-
- /* "View.MemoryView":272
- * cdef str mode = "fortran" if c_mode[0] == b'f' else "c" # this often comes from a constant C string.
- *
- * if buf is NULL: # <<<<<<<<<<<<<<
- * result = array.__new__(array, shape, itemsize, format, mode)
- * else:
- */
- goto __pyx_L3;
- }
-
- /* "View.MemoryView":275
- * result = array.__new__(array, shape, itemsize, format, mode)
- * else:
- * result = array.__new__(array, shape, itemsize, format, mode, allocate_buffer=False) # <<<<<<<<<<<<<<
- * result.data = buf
- *
- */
- /*else*/ {
- __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 275, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_3);
- __pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 275, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_4);
- __pyx_t_1 = PyTuple_New(4); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 275, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- __Pyx_INCREF(__pyx_v_shape);
- __Pyx_GIVEREF(__pyx_v_shape);
- PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_shape);
- __Pyx_GIVEREF(__pyx_t_3);
- PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_3);
- __Pyx_GIVEREF(__pyx_t_4);
- PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_t_4);
- __Pyx_INCREF(__pyx_v_mode);
- __Pyx_GIVEREF(__pyx_v_mode);
- PyTuple_SET_ITEM(__pyx_t_1, 3, __pyx_v_mode);
- __pyx_t_3 = 0;
- __pyx_t_4 = 0;
- __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 275, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_4);
- if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(1, 275, __pyx_L1_error)
- __pyx_t_3 = ((PyObject *)__pyx_tp_new_array(((PyTypeObject *)__pyx_array_type), __pyx_t_1, __pyx_t_4)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 275, __pyx_L1_error)
- __Pyx_GOTREF((PyObject *)__pyx_t_3);
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_3);
- __pyx_t_3 = 0;
-
- /* "View.MemoryView":276
- * else:
- * result = array.__new__(array, shape, itemsize, format, mode, allocate_buffer=False)
- * result.data = buf # <<<<<<<<<<<<<<
- *
- * return result
- */
- __pyx_v_result->data = __pyx_v_buf;
- }
- __pyx_L3:;
-
- /* "View.MemoryView":278
- * result.data = buf
- *
- * return result # <<<<<<<<<<<<<<
- *
- *
- */
- __Pyx_XDECREF((PyObject *)__pyx_r);
- __Pyx_INCREF((PyObject *)__pyx_v_result);
- __pyx_r = __pyx_v_result;
- goto __pyx_L0;
-
- /* "View.MemoryView":268
- *
- * @cname("__pyx_array_new")
- * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, char *c_mode, char *buf): # <<<<<<<<<<<<<<
- * cdef array result
- * cdef str mode = "fortran" if c_mode[0] == b'f' else "c" # this often comes from a constant C string.
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_1);
- __Pyx_XDECREF(__pyx_t_3);
- __Pyx_XDECREF(__pyx_t_4);
- __Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = 0;
- __pyx_L0:;
- __Pyx_XDECREF((PyObject *)__pyx_v_result);
- __Pyx_XDECREF(__pyx_v_mode);
- __Pyx_XGIVEREF((PyObject *)__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":304
- * cdef class Enum(object):
- * cdef object name
- * def __init__(self, name): # <<<<<<<<<<<<<<
- * self.name = name
- * def __repr__(self):
- */
-
-/* Python wrapper */
-static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
-static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
- PyObject *__pyx_v_name = 0;
- CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- int __pyx_r;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
- {
- PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0};
- PyObject* values[1] = {0};
- if (__pyx_kwds) {
- Py_ssize_t kw_args;
- switch (__pyx_nargs) {
- case 1: values[0] = __Pyx_Arg_VARARGS(__pyx_args, 0);
- CYTHON_FALLTHROUGH;
- case 0: break;
- default: goto __pyx_L5_argtuple_error;
- }
- kw_args = __Pyx_NumKwargs_VARARGS(__pyx_kwds);
- switch (__pyx_nargs) {
- case 0:
- if (likely((values[0] = __Pyx_GetKwValue_VARARGS(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_name)) != 0)) kw_args--;
- else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 304, __pyx_L3_error)
- else goto __pyx_L5_argtuple_error;
- }
- if (unlikely(kw_args > 0)) {
- const Py_ssize_t kwd_pos_args = __pyx_nargs;
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "__init__") < 0)) __PYX_ERR(1, 304, __pyx_L3_error)
- }
- } else if (unlikely(__pyx_nargs != 1)) {
- goto __pyx_L5_argtuple_error;
- } else {
- values[0] = __Pyx_Arg_VARARGS(__pyx_args, 0);
- }
- __pyx_v_name = values[0];
- }
- goto __pyx_L4_argument_unpacking_done;
- __pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 304, __pyx_L3_error)
- __pyx_L3_error:;
- __Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __Pyx_RefNannyFinishContext();
- return -1;
- __pyx_L4_argument_unpacking_done:;
- __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name);
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) {
- int __pyx_r;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__init__", 0);
-
- /* "View.MemoryView":305
- * cdef object name
- * def __init__(self, name):
- * self.name = name # <<<<<<<<<<<<<<
- * def __repr__(self):
- * return self.name
- */
- __Pyx_INCREF(__pyx_v_name);
- __Pyx_GIVEREF(__pyx_v_name);
- __Pyx_GOTREF(__pyx_v_self->name);
- __Pyx_DECREF(__pyx_v_self->name);
- __pyx_v_self->name = __pyx_v_name;
-
- /* "View.MemoryView":304
- * cdef class Enum(object):
- * cdef object name
- * def __init__(self, name): # <<<<<<<<<<<<<<
- * self.name = name
- * def __repr__(self):
- */
-
- /* function exit code */
- __pyx_r = 0;
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":306
- * def __init__(self, name):
- * self.name = name
- * def __repr__(self): # <<<<<<<<<<<<<<
- * return self.name
- *
- */
-
-/* Python wrapper */
-static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/
-static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) {
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
- PyObject *__pyx_r = 0;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
- __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self));
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) {
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__repr__", 0);
-
- /* "View.MemoryView":307
- * self.name = name
- * def __repr__(self):
- * return self.name # <<<<<<<<<<<<<<
- *
- * cdef generic = Enum("")
- */
- __Pyx_XDECREF(__pyx_r);
- __Pyx_INCREF(__pyx_v_self->name);
- __pyx_r = __pyx_v_self->name;
- goto __pyx_L0;
-
- /* "View.MemoryView":306
- * def __init__(self, name):
- * self.name = name
- * def __repr__(self): # <<<<<<<<<<<<<<
- * return self.name
- *
- */
-
- /* function exit code */
- __pyx_L0:;
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "(tree fragment)":1
- * def __reduce_cython__(self): # <<<<<<<<<<<<<<
- * cdef tuple state
- * cdef object _dict
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self,
-#if CYTHON_METH_FASTCALL
-PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
-#else
-PyObject *__pyx_args, PyObject *__pyx_kwds
-#endif
-); /*proto*/
-static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self,
-#if CYTHON_METH_FASTCALL
-PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
-#else
-PyObject *__pyx_args, PyObject *__pyx_kwds
-#endif
-) {
- #if !CYTHON_METH_FASTCALL
- CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
- #endif
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
- PyObject *__pyx_r = 0;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
- if (unlikely(__pyx_nargs > 0)) {
- __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL;}
- if (unlikely(__pyx_kwds) && __Pyx_NumKwargs_FASTCALL(__pyx_kwds) && unlikely(!__Pyx_CheckKeywordStrings(__pyx_kwds, "__reduce_cython__", 0))) return NULL;
- __pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self));
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) {
- PyObject *__pyx_v_state = 0;
- PyObject *__pyx_v__dict = 0;
- int __pyx_v_use_setstate;
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- PyObject *__pyx_t_1 = NULL;
- int __pyx_t_2;
- PyObject *__pyx_t_3 = NULL;
- PyObject *__pyx_t_4 = NULL;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("__reduce_cython__", 0);
-
- /* "(tree fragment)":5
- * cdef object _dict
- * cdef bint use_setstate
- * state = (self.name,) # <<<<<<<<<<<<<<
- * _dict = getattr(self, '__dict__', None)
- * if _dict is not None:
- */
- __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- __Pyx_INCREF(__pyx_v_self->name);
- __Pyx_GIVEREF(__pyx_v_self->name);
- PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name);
- __pyx_v_state = ((PyObject*)__pyx_t_1);
- __pyx_t_1 = 0;
-
- /* "(tree fragment)":6
- * cdef bint use_setstate
- * state = (self.name,)
- * _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<<
- * if _dict is not None:
- * state += (_dict,)
- */
- __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- __pyx_v__dict = __pyx_t_1;
- __pyx_t_1 = 0;
-
- /* "(tree fragment)":7
- * state = (self.name,)
- * _dict = getattr(self, '__dict__', None)
- * if _dict is not None: # <<<<<<<<<<<<<<
- * state += (_dict,)
- * use_setstate = True
- */
- __pyx_t_2 = (__pyx_v__dict != Py_None);
- if (__pyx_t_2) {
-
- /* "(tree fragment)":8
- * _dict = getattr(self, '__dict__', None)
- * if _dict is not None:
- * state += (_dict,) # <<<<<<<<<<<<<<
- * use_setstate = True
- * else:
- */
- __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- __Pyx_INCREF(__pyx_v__dict);
- __Pyx_GIVEREF(__pyx_v__dict);
- PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict);
- __pyx_t_3 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 8, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_3);
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_3));
- __pyx_t_3 = 0;
-
- /* "(tree fragment)":9
- * if _dict is not None:
- * state += (_dict,)
- * use_setstate = True # <<<<<<<<<<<<<<
- * else:
- * use_setstate = self.name is not None
- */
- __pyx_v_use_setstate = 1;
-
- /* "(tree fragment)":7
- * state = (self.name,)
- * _dict = getattr(self, '__dict__', None)
- * if _dict is not None: # <<<<<<<<<<<<<<
- * state += (_dict,)
- * use_setstate = True
- */
- goto __pyx_L3;
- }
-
- /* "(tree fragment)":11
- * use_setstate = True
- * else:
- * use_setstate = self.name is not None # <<<<<<<<<<<<<<
- * if use_setstate:
- * return __pyx_unpickle_Enum, (type(self), 0x82a3537, None), state
- */
- /*else*/ {
- __pyx_t_2 = (__pyx_v_self->name != Py_None);
- __pyx_v_use_setstate = __pyx_t_2;
- }
- __pyx_L3:;
-
- /* "(tree fragment)":12
- * else:
- * use_setstate = self.name is not None
- * if use_setstate: # <<<<<<<<<<<<<<
- * return __pyx_unpickle_Enum, (type(self), 0x82a3537, None), state
- * else:
- */
- if (__pyx_v_use_setstate) {
-
- /* "(tree fragment)":13
- * use_setstate = self.name is not None
- * if use_setstate:
- * return __pyx_unpickle_Enum, (type(self), 0x82a3537, None), state # <<<<<<<<<<<<<<
- * else:
- * return __pyx_unpickle_Enum, (type(self), 0x82a3537, state)
- */
- __Pyx_XDECREF(__pyx_r);
- __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 13, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_3);
- __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
- __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
- PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
- __Pyx_INCREF(__pyx_int_136983863);
- __Pyx_GIVEREF(__pyx_int_136983863);
- PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_136983863);
- __Pyx_INCREF(Py_None);
- __Pyx_GIVEREF(Py_None);
- PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None);
- __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_4);
- __Pyx_GIVEREF(__pyx_t_3);
- PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
- __Pyx_GIVEREF(__pyx_t_1);
- PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1);
- __Pyx_INCREF(__pyx_v_state);
- __Pyx_GIVEREF(__pyx_v_state);
- PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_v_state);
- __pyx_t_3 = 0;
- __pyx_t_1 = 0;
- __pyx_r = __pyx_t_4;
- __pyx_t_4 = 0;
- goto __pyx_L0;
-
- /* "(tree fragment)":12
- * else:
- * use_setstate = self.name is not None
- * if use_setstate: # <<<<<<<<<<<<<<
- * return __pyx_unpickle_Enum, (type(self), 0x82a3537, None), state
- * else:
- */
- }
-
- /* "(tree fragment)":15
- * return __pyx_unpickle_Enum, (type(self), 0x82a3537, None), state
- * else:
- * return __pyx_unpickle_Enum, (type(self), 0x82a3537, state) # <<<<<<<<<<<<<<
- * def __setstate_cython__(self, __pyx_state):
- * __pyx_unpickle_Enum__set_state(self, __pyx_state)
- */
- /*else*/ {
- __Pyx_XDECREF(__pyx_r);
- __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_4);
- __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
- __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
- PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
- __Pyx_INCREF(__pyx_int_136983863);
- __Pyx_GIVEREF(__pyx_int_136983863);
- PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_136983863);
- __Pyx_INCREF(__pyx_v_state);
- __Pyx_GIVEREF(__pyx_v_state);
- PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state);
- __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 15, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_3);
- __Pyx_GIVEREF(__pyx_t_4);
- PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4);
- __Pyx_GIVEREF(__pyx_t_1);
- PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
- __pyx_t_4 = 0;
- __pyx_t_1 = 0;
- __pyx_r = __pyx_t_3;
- __pyx_t_3 = 0;
- goto __pyx_L0;
- }
-
- /* "(tree fragment)":1
- * def __reduce_cython__(self): # <<<<<<<<<<<<<<
- * cdef tuple state
- * cdef object _dict
- */
-
- /* function exit code */
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_1);
- __Pyx_XDECREF(__pyx_t_3);
- __Pyx_XDECREF(__pyx_t_4);
- __Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = NULL;
- __pyx_L0:;
- __Pyx_XDECREF(__pyx_v_state);
- __Pyx_XDECREF(__pyx_v__dict);
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "(tree fragment)":16
- * else:
- * return __pyx_unpickle_Enum, (type(self), 0x82a3537, state)
- * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
- * __pyx_unpickle_Enum__set_state(self, __pyx_state)
- */
-
-/* Python wrapper */
-static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self,
-#if CYTHON_METH_FASTCALL
-PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
-#else
-PyObject *__pyx_args, PyObject *__pyx_kwds
-#endif
-); /*proto*/
-static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self,
-#if CYTHON_METH_FASTCALL
-PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
-#else
-PyObject *__pyx_args, PyObject *__pyx_kwds
-#endif
-) {
- PyObject *__pyx_v___pyx_state = 0;
- #if !CYTHON_METH_FASTCALL
- CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
- #endif
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- PyObject *__pyx_r = 0;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
- {
- PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_state,0};
- PyObject* values[1] = {0};
- if (__pyx_kwds) {
- Py_ssize_t kw_args;
- switch (__pyx_nargs) {
- case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0);
- CYTHON_FALLTHROUGH;
- case 0: break;
- default: goto __pyx_L5_argtuple_error;
- }
- kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds);
- switch (__pyx_nargs) {
- case 0:
- if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_pyx_state)) != 0)) kw_args--;
- else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 16, __pyx_L3_error)
- else goto __pyx_L5_argtuple_error;
- }
- if (unlikely(kw_args > 0)) {
- const Py_ssize_t kwd_pos_args = __pyx_nargs;
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "__setstate_cython__") < 0)) __PYX_ERR(1, 16, __pyx_L3_error)
- }
- } else if (unlikely(__pyx_nargs != 1)) {
- goto __pyx_L5_argtuple_error;
- } else {
- values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0);
- }
- __pyx_v___pyx_state = values[0];
- }
- goto __pyx_L4_argument_unpacking_done;
- __pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 16, __pyx_L3_error)
- __pyx_L3_error:;
- __Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __Pyx_RefNannyFinishContext();
- return NULL;
- __pyx_L4_argument_unpacking_done:;
- __pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v___pyx_state);
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannyDeclarations
- PyObject *__pyx_t_1 = NULL;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("__setstate_cython__", 0);
-
- /* "(tree fragment)":17
- * return __pyx_unpickle_Enum, (type(self), 0x82a3537, state)
- * def __setstate_cython__(self, __pyx_state):
- * __pyx_unpickle_Enum__set_state(self, __pyx_state) # <<<<<<<<<<<<<<
- */
- if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None) || __Pyx_RaiseUnexpectedTypeError("tuple", __pyx_v___pyx_state))) __PYX_ERR(1, 17, __pyx_L1_error)
- __pyx_t_1 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_1);
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-
- /* "(tree fragment)":16
- * else:
- * return __pyx_unpickle_Enum, (type(self), 0x82a3537, state)
- * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
- * __pyx_unpickle_Enum__set_state(self, __pyx_state)
- */
-
- /* function exit code */
- __pyx_r = Py_None; __Pyx_INCREF(Py_None);
- goto __pyx_L0;
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_1);
- __Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = NULL;
- __pyx_L0:;
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":349
- * cdef __Pyx_TypeInfo *typeinfo
- *
- * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<<
- * self.obj = obj
- * self.flags = flags
- */
-
-/* Python wrapper */
-static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
-static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
- PyObject *__pyx_v_obj = 0;
- int __pyx_v_flags;
- int __pyx_v_dtype_is_object;
- CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- int __pyx_r;
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
- {
- PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0};
- PyObject* values[3] = {0,0,0};
- if (__pyx_kwds) {
- Py_ssize_t kw_args;
- switch (__pyx_nargs) {
- case 3: values[2] = __Pyx_Arg_VARARGS(__pyx_args, 2);
- CYTHON_FALLTHROUGH;
- case 2: values[1] = __Pyx_Arg_VARARGS(__pyx_args, 1);
- CYTHON_FALLTHROUGH;
- case 1: values[0] = __Pyx_Arg_VARARGS(__pyx_args, 0);
- CYTHON_FALLTHROUGH;
- case 0: break;
- default: goto __pyx_L5_argtuple_error;
- }
- kw_args = __Pyx_NumKwargs_VARARGS(__pyx_kwds);
- switch (__pyx_nargs) {
- case 0:
- if (likely((values[0] = __Pyx_GetKwValue_VARARGS(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_obj)) != 0)) kw_args--;
- else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 349, __pyx_L3_error)
- else goto __pyx_L5_argtuple_error;
- CYTHON_FALLTHROUGH;
- case 1:
- if (likely((values[1] = __Pyx_GetKwValue_VARARGS(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_flags)) != 0)) kw_args--;
- else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 349, __pyx_L3_error)
- else {
- __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(1, 349, __pyx_L3_error)
- }
- CYTHON_FALLTHROUGH;
- case 2:
- if (kw_args > 0) {
- PyObject* value = __Pyx_GetKwValue_VARARGS(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_dtype_is_object);
- if (value) { values[2] = value; kw_args--; }
- else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 349, __pyx_L3_error)
- }
- }
- if (unlikely(kw_args > 0)) {
- const Py_ssize_t kwd_pos_args = __pyx_nargs;
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "__cinit__") < 0)) __PYX_ERR(1, 349, __pyx_L3_error)
- }
- } else {
- switch (__pyx_nargs) {
- case 3: values[2] = __Pyx_Arg_VARARGS(__pyx_args, 2);
- CYTHON_FALLTHROUGH;
- case 2: values[1] = __Pyx_Arg_VARARGS(__pyx_args, 1);
- values[0] = __Pyx_Arg_VARARGS(__pyx_args, 0);
- break;
- default: goto __pyx_L5_argtuple_error;
- }
- }
- __pyx_v_obj = values[0];
- __pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 349, __pyx_L3_error)
- if (values[2]) {
- __pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 349, __pyx_L3_error)
- } else {
- __pyx_v_dtype_is_object = ((int)0);
- }
- }
- goto __pyx_L4_argument_unpacking_done;
- __pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, __pyx_nargs); __PYX_ERR(1, 349, __pyx_L3_error)
- __pyx_L3_error:;
- __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __Pyx_RefNannyFinishContext();
- return -1;
- __pyx_L4_argument_unpacking_done:;
- __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object);
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) {
- int __pyx_r;
- __Pyx_RefNannyDeclarations
- int __pyx_t_1;
- int __pyx_t_2;
- int __pyx_t_3;
- Py_intptr_t __pyx_t_4;
- size_t __pyx_t_5;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("__cinit__", 0);
-
- /* "View.MemoryView":350
- *
- * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
- * self.obj = obj # <<<<<<<<<<<<<<
- * self.flags = flags
- * if type(self) is memoryview or obj is not None:
- */
- __Pyx_INCREF(__pyx_v_obj);
- __Pyx_GIVEREF(__pyx_v_obj);
- __Pyx_GOTREF(__pyx_v_self->obj);
- __Pyx_DECREF(__pyx_v_self->obj);
- __pyx_v_self->obj = __pyx_v_obj;
-
- /* "View.MemoryView":351
- * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
- * self.obj = obj
- * self.flags = flags # <<<<<<<<<<<<<<
- * if type(self) is memoryview or obj is not None:
- * __Pyx_GetBuffer(obj, &self.view, flags)
- */
- __pyx_v_self->flags = __pyx_v_flags;
-
- /* "View.MemoryView":352
- * self.obj = obj
- * self.flags = flags
- * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<<
- * __Pyx_GetBuffer(obj, &self.view, flags)
- * if self.view.obj == NULL:
- */
- __pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type));
- if (!__pyx_t_2) {
- } else {
- __pyx_t_1 = __pyx_t_2;
- goto __pyx_L4_bool_binop_done;
- }
- __pyx_t_2 = (__pyx_v_obj != Py_None);
- __pyx_t_1 = __pyx_t_2;
- __pyx_L4_bool_binop_done:;
- if (__pyx_t_1) {
-
- /* "View.MemoryView":353
- * self.flags = flags
- * if type(self) is memoryview or obj is not None:
- * __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<<
- * if self.view.obj == NULL:
- * (<__pyx_buffer *> &self.view).obj = Py_None
- */
- __pyx_t_3 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 353, __pyx_L1_error)
-
- /* "View.MemoryView":354
- * if type(self) is memoryview or obj is not None:
- * __Pyx_GetBuffer(obj, &self.view, flags)
- * if self.view.obj == NULL: # <<<<<<<<<<<<<<
- * (<__pyx_buffer *> &self.view).obj = Py_None
- * Py_INCREF(Py_None)
- */
- __pyx_t_1 = (((PyObject *)__pyx_v_self->view.obj) == NULL);
- if (__pyx_t_1) {
-
- /* "View.MemoryView":355
- * __Pyx_GetBuffer(obj, &self.view, flags)
- * if self.view.obj == NULL:
- * (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<<
- * Py_INCREF(Py_None)
- *
- */
- ((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None;
-
- /* "View.MemoryView":356
- * if self.view.obj == NULL:
- * (<__pyx_buffer *> &self.view).obj = Py_None
- * Py_INCREF(Py_None) # <<<<<<<<<<<<<<
- *
- * if not __PYX_CYTHON_ATOMICS_ENABLED():
- */
- Py_INCREF(Py_None);
-
- /* "View.MemoryView":354
- * if type(self) is memoryview or obj is not None:
- * __Pyx_GetBuffer(obj, &self.view, flags)
- * if self.view.obj == NULL: # <<<<<<<<<<<<<<
- * (<__pyx_buffer *> &self.view).obj = Py_None
- * Py_INCREF(Py_None)
- */
- }
-
- /* "View.MemoryView":352
- * self.obj = obj
- * self.flags = flags
- * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<<
- * __Pyx_GetBuffer(obj, &self.view, flags)
- * if self.view.obj == NULL:
- */
- }
-
- /* "View.MemoryView":358
- * Py_INCREF(Py_None)
- *
- * if not __PYX_CYTHON_ATOMICS_ENABLED(): # <<<<<<<<<<<<<<
- * global __pyx_memoryview_thread_locks_used
- * if __pyx_memoryview_thread_locks_used < 8:
- */
- __pyx_t_1 = (!__PYX_CYTHON_ATOMICS_ENABLED());
- if (__pyx_t_1) {
-
- /* "View.MemoryView":360
- * if not __PYX_CYTHON_ATOMICS_ENABLED():
- * global __pyx_memoryview_thread_locks_used
- * if __pyx_memoryview_thread_locks_used < 8: # <<<<<<<<<<<<<<
- * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
- * __pyx_memoryview_thread_locks_used += 1
- */
- __pyx_t_1 = (__pyx_memoryview_thread_locks_used < 8);
- if (__pyx_t_1) {
-
- /* "View.MemoryView":361
- * global __pyx_memoryview_thread_locks_used
- * if __pyx_memoryview_thread_locks_used < 8:
- * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<<
- * __pyx_memoryview_thread_locks_used += 1
- * if self.lock is NULL:
- */
- __pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]);
-
- /* "View.MemoryView":362
- * if __pyx_memoryview_thread_locks_used < 8:
- * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
- * __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<<
- * if self.lock is NULL:
- * self.lock = PyThread_allocate_lock()
- */
- __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1);
-
- /* "View.MemoryView":360
- * if not __PYX_CYTHON_ATOMICS_ENABLED():
- * global __pyx_memoryview_thread_locks_used
- * if __pyx_memoryview_thread_locks_used < 8: # <<<<<<<<<<<<<<
- * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
- * __pyx_memoryview_thread_locks_used += 1
- */
- }
-
- /* "View.MemoryView":363
- * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
- * __pyx_memoryview_thread_locks_used += 1
- * if self.lock is NULL: # <<<<<<<<<<<<<<
- * self.lock = PyThread_allocate_lock()
- * if self.lock is NULL:
- */
- __pyx_t_1 = (__pyx_v_self->lock == NULL);
- if (__pyx_t_1) {
-
- /* "View.MemoryView":364
- * __pyx_memoryview_thread_locks_used += 1
- * if self.lock is NULL:
- * self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<<
- * if self.lock is NULL:
- * raise MemoryError
- */
- __pyx_v_self->lock = PyThread_allocate_lock();
-
- /* "View.MemoryView":365
- * if self.lock is NULL:
- * self.lock = PyThread_allocate_lock()
- * if self.lock is NULL: # <<<<<<<<<<<<<<
- * raise MemoryError
- *
- */
- __pyx_t_1 = (__pyx_v_self->lock == NULL);
- if (unlikely(__pyx_t_1)) {
-
- /* "View.MemoryView":366
- * self.lock = PyThread_allocate_lock()
- * if self.lock is NULL:
- * raise MemoryError # <<<<<<<<<<<<<<
- *
- * if flags & PyBUF_FORMAT:
- */
- PyErr_NoMemory(); __PYX_ERR(1, 366, __pyx_L1_error)
-
- /* "View.MemoryView":365
- * if self.lock is NULL:
- * self.lock = PyThread_allocate_lock()
- * if self.lock is NULL: # <<<<<<<<<<<<<<
- * raise MemoryError
- *
- */
- }
-
- /* "View.MemoryView":363
- * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
- * __pyx_memoryview_thread_locks_used += 1
- * if self.lock is NULL: # <<<<<<<<<<<<<<
- * self.lock = PyThread_allocate_lock()
- * if self.lock is NULL:
- */
- }
-
- /* "View.MemoryView":358
- * Py_INCREF(Py_None)
- *
- * if not __PYX_CYTHON_ATOMICS_ENABLED(): # <<<<<<<<<<<<<<
- * global __pyx_memoryview_thread_locks_used
- * if __pyx_memoryview_thread_locks_used < 8:
- */
- }
-
- /* "View.MemoryView":368
- * raise MemoryError
- *
- * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
- * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
- * else:
- */
- __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
- if (__pyx_t_1) {
-
- /* "View.MemoryView":369
- *
- * if flags & PyBUF_FORMAT:
- * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<<
- * else:
- * self.dtype_is_object = dtype_is_object
- */
- __pyx_t_2 = ((__pyx_v_self->view.format[0]) == 'O');
- if (__pyx_t_2) {
- } else {
- __pyx_t_1 = __pyx_t_2;
- goto __pyx_L12_bool_binop_done;
- }
- __pyx_t_2 = ((__pyx_v_self->view.format[1]) == '\x00');
- __pyx_t_1 = __pyx_t_2;
- __pyx_L12_bool_binop_done:;
- __pyx_v_self->dtype_is_object = __pyx_t_1;
-
- /* "View.MemoryView":368
- * raise MemoryError
- *
- * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
- * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
- * else:
- */
- goto __pyx_L11;
- }
-
- /* "View.MemoryView":371
- * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
- * else:
- * self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<<
- *
- * assert (&self.acquisition_count) % sizeof(__pyx_atomic_int_type) == 0
- */
- /*else*/ {
- __pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object;
- }
- __pyx_L11:;
-
- /* "View.MemoryView":373
- * self.dtype_is_object = dtype_is_object
- *
- * assert (&self.acquisition_count) % sizeof(__pyx_atomic_int_type) == 0 # <<<<<<<<<<<<<<
- * self.typeinfo = NULL
- *
- */
- #ifndef CYTHON_WITHOUT_ASSERTIONS
- if (unlikely(__pyx_assertions_enabled())) {
- __pyx_t_4 = ((Py_intptr_t)((void *)(&__pyx_v_self->acquisition_count)));
- __pyx_t_5 = (sizeof(__pyx_atomic_int_type));
- if (unlikely(__pyx_t_5 == 0)) {
- PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
- __PYX_ERR(1, 373, __pyx_L1_error)
- }
- __pyx_t_1 = ((__pyx_t_4 % __pyx_t_5) == 0);
- if (unlikely(!__pyx_t_1)) {
- __Pyx_Raise(__pyx_builtin_AssertionError, 0, 0, 0);
- __PYX_ERR(1, 373, __pyx_L1_error)
- }
- }
- #else
- if ((1)); else __PYX_ERR(1, 373, __pyx_L1_error)
- #endif
-
- /* "View.MemoryView":374
- *
- * assert (&self.acquisition_count) % sizeof(__pyx_atomic_int_type) == 0
- * self.typeinfo = NULL # <<<<<<<<<<<<<<
- *
- * def __dealloc__(memoryview self):
- */
- __pyx_v_self->typeinfo = NULL;
-
- /* "View.MemoryView":349
- * cdef __Pyx_TypeInfo *typeinfo
- *
- * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<<
- * self.obj = obj
- * self.flags = flags
- */
-
- /* function exit code */
- __pyx_r = 0;
- goto __pyx_L0;
- __pyx_L1_error:;
- __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
- __pyx_r = -1;
- __pyx_L0:;
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "View.MemoryView":376
- * self.typeinfo = NULL
- *
- * def __dealloc__(memoryview self): # <<<<<<<<<<<<<<
- * if self.obj is not None:
- * __Pyx_ReleaseBuffer(&self.view)
- */
-
-/* Python wrapper */
-static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/
-static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) {
- CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs);
- __Pyx_RefNannyDeclarations
- __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
- __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self));
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
-}
-
-static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) {
- int __pyx_v_i;
- __Pyx_RefNannyDeclarations
- int __pyx_t_1;
- int __pyx_t_2;
- int __pyx_t_3;
- int __pyx_t_4;
- PyThread_type_lock __pyx_t_5;
- PyThread_type_lock __pyx_t_6;
- __Pyx_RefNannySetupContext("__dealloc__", 0);
-
- /* "View.MemoryView":377
- *
- * def __dealloc__(memoryview self):
- * if self.obj is not None: # <<<<<<<<<<<<<<
- * __Pyx_ReleaseBuffer(&self.view)
- * elif (<__pyx_buffer *> &self.view).obj == Py_None:
- */
- __pyx_t_1 = (__pyx_v_self->obj != Py_None);
- if (__pyx_t_1) {
-
- /* "View.MemoryView":378
- * def __dealloc__(memoryview self):
- * if self.obj is not None:
- * __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<<
- * elif (<__pyx_buffer *> &self.view).obj == Py_None:
- *
- */
- __Pyx_ReleaseBuffer((&__pyx_v_self->view));
-
- /* "View.MemoryView":377
- *
- * def __dealloc__(memoryview self):
- * if self.obj is not None: # <<<<<<<<<<<<<<
- * __Pyx_ReleaseBuffer(&self.view)
- * elif (<__pyx_buffer *> &self.view).obj == Py_None:
- */
- goto __pyx_L3;
- }
-
- /* "View.MemoryView":379
- * if self.obj is not None:
- * __Pyx_ReleaseBuffer(&self.view)
- * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<<
- *
- * (<__pyx_buffer *> &self.view).obj = NULL
- */
- __pyx_t_1 = (((Py_buffer *)(&__pyx_v_self->view))->obj == Py_None);
- if (__pyx_t_1) {
-
- /* "View.MemoryView":381
- * elif (<__pyx_buffer *> &self.view).obj == Py_None:
- *
- * (<__pyx_buffer *> &self.view).obj = NULL # <<<<<<<<<<<<<<
- * Py_DECREF(Py_None)
- *
- */
- ((Py_buffer *)(&__pyx_v_self->view))->obj = NULL;
-
- /* "View.MemoryView":382
- *
- * (<__pyx_buffer *> &self.view).obj = NULL
- * Py_DECREF(Py_None) # <<<<<<<<<<<<<<
- *
- * cdef int i
- */
- Py_DECREF(Py_None);
-
- /* "View.MemoryView":379
- * if self.obj is not None:
- * __Pyx_ReleaseBuffer(&self.view)
- * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<<
- *
- * (<__pyx_buffer *> &self.view).obj = NULL
- */
- }
- __pyx_L3:;
-
- /* "View.MemoryView":386
- * cdef int i
- * global __pyx_memoryview_thread_locks_used
- * if self.lock != NULL: # <<<<<<<<<<<<<<
- * for i in range(__pyx_memoryview_thread_locks_used):
- * if __pyx_memoryview_thread_locks[i] is self.lock:
- */
- __pyx_t_1 = (__pyx_v_self->lock != NULL);
- if (__pyx_t_1) {
-
- /* "View.MemoryView":387
- * global __pyx_memoryview_thread_locks_used
- * if self.lock != NULL:
- * for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<<
- * if __pyx_memoryview_thread_locks[i] is self.lock:
- * __pyx_memoryview_thread_locks_used -= 1
- */
- __pyx_t_2 = __pyx_memoryview_thread_locks_used;
- __pyx_t_3 = __pyx_t_2;
- for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
- __pyx_v_i = __pyx_t_4;
-
- /* "View.MemoryView":388
- * if self.lock != NULL:
- * for i in range(__pyx_memoryview_thread_locks_used):
- * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<<
- * __pyx_memoryview_thread_locks_used -= 1
- * if i != __pyx_memoryview_thread_locks_used:
- */
- __pyx_t_1 = ((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock);
- if (__pyx_t_1) {
-
- /* "View.MemoryView":389
- * for i in range(__pyx_memoryview_thread_locks_used):
- * if __pyx_memoryview_thread_locks[i] is self.lock:
- * __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<<
- * if i != __pyx_memoryview_thread_locks_used:
- * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
- */
- __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1);
-
- /* "View.MemoryView":390
- * if __pyx_memoryview_thread_locks[i] is self.lock:
- * __pyx_memoryview_thread_locks_used -= 1
- * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<<
- * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
- * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
- */
- __pyx_t_1 = (__pyx_v_i != __pyx_memoryview_thread_locks_used);
- if (__pyx_t_1) {
-
- /* "View.MemoryView":392
- * if i != __pyx_memoryview_thread_locks_used:
- * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
- * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<<
- * break
- * else:
- */
- __pyx_t_5 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]);
- __pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_v_i]);
-
- /* "View.MemoryView":391
- * __pyx_memoryview_thread_locks_used -= 1
- * if i != __pyx_memoryview_thread_locks_used:
- * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<<
- * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
- * break
- */
- (__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_5;
- (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_6;
-
- /* "View.MemoryView":390
- * if __pyx_memoryview_thread_locks[i] is self.lock:
- * __pyx_memoryview_thread_locks_used -= 1
- * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<<
- * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
- * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
- */
- }
-
- /* "View.MemoryView":393
- * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
- * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
- * break # <<<<<<<<<<<<<<
- * else:
- * PyThread_free_lock(self.lock)
- */
- goto __pyx_L6_break;
-
- /* "View.MemoryView":388
- * if self.lock != NULL:
- * for i in range(__pyx_memoryview_thread_locks_used):
- * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<<
- * __pyx_memoryview_thread_locks_used -= 1
- * if i != __pyx_memoryview_thread_locks_used:
- */
- }
- }
- /*else*/ {
-
- /* "View.MemoryView":395
- * break
- * else:
- * PyThread_free_lock(self.lock) # <<<<<<<<<<<<<<
- *
- * cdef char *get_item_pointer(memoryview self, object index) except NULL:
- */
- PyThread_free_lock(__pyx_v_self->lock);
- }
- __pyx_L6_break:;
-
- /* "View.MemoryView":386
- * cdef int i
- * global __pyx_memoryview_thread_locks_used
- * if self.lock != NULL: # <<<<<<<<<<<<<<
- * for i in range(__pyx_memoryview_thread_locks_used):
- * if __pyx_memoryview_thread_locks[i] is self.lock:
- */
- }
-
- /* "View.MemoryView":376
- * self.typeinfo = NULL
- *
- * def __dealloc__(memoryview self): # <<<<<<<<<<<<<<
- * if self.obj is not None:
- * __Pyx_ReleaseBuffer(&self.view)
- */
-
- /* function exit code */
- __Pyx_RefNannyFinishContext();
-}
-
-/* "View.MemoryView":397
- * PyThread_free_lock(self.lock)
- *
- * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<<
- * cdef Py_ssize_t dim
- * cdef char *itemp = self.view.buf
- */
-
-static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) {
- Py_ssize_t __pyx_v_dim;
- char *__pyx_v_itemp;
- PyObject *__pyx_v_idx = NULL;
- char *__pyx_r;
- __Pyx_RefNannyDeclarations
- Py_ssize_t __pyx_t_1;
- PyObject *__pyx_t_2 = NULL;
- Py_ssize_t __pyx_t_3;
- PyObject *(*__pyx_t_4)(PyObject *);
- PyObject *__pyx_t_5 = NULL;
- Py_ssize_t __pyx_t_6;
- char *__pyx_t_7;
- int __pyx_lineno = 0;
- const char *__pyx_filename = NULL;
- int __pyx_clineno = 0;
- __Pyx_RefNannySetupContext("get_item_pointer", 0);
-
- /* "View.MemoryView":399
- * cdef char *get_item_pointer(memoryview self, object index) except NULL:
- * cdef Py_ssize_t dim
- * cdef char *itemp = self.view.buf # <<<<<<<<<<<<<<
- *
- * for dim, idx in enumerate(index):
- */
- __pyx_v_itemp = ((char *)__pyx_v_self->view.buf);
-
- /* "View.MemoryView":401
- * cdef char *itemp = self.view.buf
- *
- * for dim, idx in enumerate(index): # <<<<<<<<<<<<<<
- * itemp = pybuffer_index(&self.view, itemp, idx, dim)
- *
- */
- __pyx_t_1 = 0;
- if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) {
- __pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0;
- __pyx_t_4 = NULL;
- } else {
- __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 401, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = __Pyx_PyObject_GetIterNextFunc(__pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 401, __pyx_L1_error)
- }
- for (;;) {
- if (likely(!__pyx_t_4)) {
- if (likely(PyList_CheckExact(__pyx_t_2))) {
- if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break;
- #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
- __pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely((0 < 0))) __PYX_ERR(1, 401, __pyx_L1_error)
- #else
- __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 401, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_5);
- #endif
- } else {
- if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break;
- #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
- __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely((0 < 0))) __PYX_ERR(1, 401, __pyx_L1_error)
- #else
- __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 401, __pyx_L1_error)
- __Pyx_GOTREF(__pyx_t_5);
- #endif
- }
- } else {
- __pyx_t_5 = __pyx_t_4(__pyx_t_2);
- if (unlikely(!__pyx_t_5)) {
- PyObject* exc_type = PyErr_Occurred();
- if (exc_type) {
- if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
- else __PYX_ERR(1, 401, __pyx_L1_error)
- }
- break;
- }
- __Pyx_GOTREF(__pyx_t_5);
- }
- __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5);
- __pyx_t_5 = 0;
- __pyx_v_dim = __pyx_t_1;
- __pyx_t_1 = (__pyx_t_1 + 1);
-
- /* "View.MemoryView":402
- *
- * for dim, idx in enumerate(index):
- * itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<<
- *
- * return itemp
- */
- __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 402, __pyx_L1_error)
- __pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 402, __pyx_L1_error)
- __pyx_v_itemp = __pyx_t_7;
-
- /* "View.MemoryView":401
- * cdef char *itemp = self.view.buf
- *
- * for dim, idx in enumerate(index): # <<<<<<<<<<<<<<
- * itemp = pybuffer_index(&self.view, itemp, idx, dim)
- *
- */
- }
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-
- /* "View.MemoryView":404
- * itemp = pybuffer_index(&self.view, itemp, idx, dim)
- *
- * return itemp # <<<<<<<<<<<<<<
- *
- *
- */
- __pyx_r = __pyx_v_itemp;
- goto __pyx_L0;
-
- /* "View.MemoryView":397
- * PyThread_free_lock(self.lock)
- *
- * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<<
- * cdef Py_ssize_t dim
- * cdef char *itemp =